code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf8 -*-
#
# Created by 'myth' on 2/19/16
import matplotlib as mpl
import settings
mpl.use('TkAgg')
| myth/trashcan | it3708/project3/modules/__init__.py | Python | gpl-2.0 | 116 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('publicbody', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='jurisdiction',
options={'ordering': ('rank', 'name'), 'verbose_name': 'Jurisdiction', 'verbose_name_plural': 'Jurisdictions'},
),
migrations.AddField(
model_name='publicbody',
name='file_index',
field=models.CharField(max_length=1024, blank=True),
),
migrations.AddField(
model_name='publicbody',
name='org_chart',
field=models.CharField(max_length=1024, blank=True),
),
]
| stefanw/froide | froide/publicbody/migrations/0002_auto_20151127_1754.py | Python | mit | 792 |
#!/usr/bin/env python
import click
import collections
import decimal
import requests
import sys
import yaml
FIAT_SUPPORTED = [
"AUD",
"BRL",
"CAD",
"CHF",
"CNY",
"EUR",
"GBP",
"HKD",
"IDR",
"INR",
"JPY",
"KRW",
"MXN",
"RUB",
"USD",
]
@click.command()
@click.option("--fiat-symbol", default="USD",
type=click.Choice(FIAT_SUPPORTED),
help="Convert value into this fiat currency.")
@click.option("--yaml-assets", multiple=True,
type=click.File(mode="rb"),
help="Path to a YAML file defining some of your assets")
def main(fiat_symbol=None, yaml_assets=None):
decimal.getcontext().prec = 19
params = {}
params["limit"] = 0
assets = collections.defaultdict(decimal.Decimal)
for assets_fh in yaml_assets:
vals = yaml.load(assets_fh)
if "assets" not in vals:
print("Invalid YAML. See example.yaml.")
sys.exit(1)
i = 0
for asset in vals["assets"]:
for expectation in ["symbol", "count"]:
if expectation not in asset:
print("Invalid YAML: expected key '%s' on asset. See example.yaml." % (expectation,))
sys.exit(1)
symbol = asset["symbol"]
count = asset["count"]
label = asset["label"] if "label" in asset else ""
assets[(symbol, label)] = assets[(symbol, label)] + decimal.Decimal(count)
if fiat_symbol != "USD":
params["convert"] = fiat_symbol
req = requests.get("https://api.coinmarketcap.com/v1/ticker/",
params=params)
ticker_list = req.json()
tickers = dict((t["symbol"], t) for t in ticker_list)
print("Symbol Total Price %3s Total %3s Pct Ch 1h Pct 1d Pct 1w Fiat 1h Fiat 1d Fiat 1w Label" % (fiat_symbol, fiat_symbol))
print("====== ===================== ===================== ===================== ===================== ===================== ===================== ===================== ===================== =====================")
total_fiat, total_fiat_1h, total_fiat_1d, total_fiat_1w = decimal.Decimal(0),decimal.Decimal(0),decimal.Decimal(0),decimal.Decimal(0)
rows = []
for symbol, label in assets.keys():
total = assets[(symbol, label)]
if symbol not in tickers:
print("%s\t%s\tUnknown symbol." %(symbol, total))
continue
tick = tickers[symbol]
price = decimal.Decimal(tick["price_%s" % (fiat_symbol.lower())])
in_fiat = total*price
pct_1h = decimal.Decimal(tick["percent_change_1h"]) / 100
pct_1d = decimal.Decimal(tick["percent_change_24h"]) / 100
pct_1w = decimal.Decimal(tick["percent_change_7d"]) / 100
price_1h = price-(price / (1 + pct_1h))
price_1d = price-(price / (1 + pct_1d))
price_1w = price-(price / (1 + pct_1w))
total_1h = total*price_1h
total_1d = total*price_1d
total_1w = total*price_1w
total_fiat = total_fiat + in_fiat
total_fiat_1h = total_fiat_1h + total_1h
total_fiat_1d = total_fiat_1d + total_1d
total_fiat_1w = total_fiat_1w + total_1w
nums = {
"symbol": symbol,
"total": total,
"price": price,
"in_fiat": in_fiat,
"pct_1h": pct_1h,
"pct_1d": pct_1d,
"pct_1w": pct_1w,
"total_1h": total_1h,
"total_1d": total_1d,
"total_1w": total_1w,
"label": label,
}
rows.append(nums)
sorted_rows = [y for x, y in sorted([(0-nums["in_fiat"], nums) for nums in rows])]
for row in sorted_rows:
print("%6s %21s %21s %21s %21s %21s %21s %21s %21s %21s %s" %(
row["symbol"], row["total"], row["price"], row["in_fiat"].quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN), row["pct_1h"]*100, row["pct_1d"]*100, row["pct_1w"]*100, row["total_1h"], row["total_1d"], row["total_1w"], row["label"]))
if total_fiat > 0:
print("%6s %21s %21s %21s %21s %21s %21s %21s %21s %21s %s" %(
"TOTAL", "N/A", "N/A", total_fiat.quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN), (1-(total_fiat-total_fiat_1h)/total_fiat)*100, (1-(total_fiat-total_fiat_1d)/total_fiat)*100, (1-(total_fiat-total_fiat_1w)/total_fiat)*100, total_fiat_1h, total_fiat_1d, total_fiat_1w, ""))
if __name__ == "__main__":
main()
| nicodemus26/altcoin_value | altcoin_value/main.py | Python | gpl-3.0 | 4,591 |
"""
Utilities for working with pandas objects.
"""
from contextlib import contextmanager
from itertools import product
import operator as op
import warnings
import pandas as pd
from distutils.version import StrictVersion
pandas_version = StrictVersion(pd.__version__)
def july_5th_holiday_observance(datetime_index):
return datetime_index[datetime_index.year != 2013]
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values
def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond
_opmap = dict(zip(
product((True, False), repeat=3),
product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)),
))
def mask_between_time(dts, start, end, include_start=True, include_end=True):
"""Return a mask of all of the datetimes in ``dts`` that are between
``start`` and ``end``.
Parameters
----------
dts : pd.DatetimeIndex
The index to mask.
start : time
Mask away times less than the start.
end : time
Mask away times greater than the end.
include_start : bool, optional
Inclusive on ``start``.
include_end : bool, optional
Inclusive on ``end``.
Returns
-------
mask : np.ndarray[bool]
A bool array masking ``dts``.
See Also
--------
:meth:`pandas.DatetimeIndex.indexer_between_time`
"""
# This function is adapted from
# `pandas.Datetime.Index.indexer_between_time` which was originally
# written by Wes McKinney, Chang She, and Grant Roch.
time_micros = dts._get_time_micros()
start_micros = _time_to_micros(start)
end_micros = _time_to_micros(end)
left_op, right_op, join_op = _opmap[
bool(include_start),
bool(include_end),
start_micros <= end_micros,
]
return join_op(
left_op(start_micros, time_micros),
right_op(time_micros, end_micros),
)
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value
def timedelta_to_integral_seconds(delta):
"""
Convert a pd.Timedelta to a number of seconds as an int.
"""
return int(delta.total_seconds())
def timedelta_to_integral_minutes(delta):
"""
Convert a pd.Timedelta to a number of minutes as an int.
"""
return timedelta_to_integral_seconds(delta) // 60
@contextmanager
def ignore_pandas_nan_categorical_warning():
with warnings.catch_warnings():
# Pandas >= 0.18 doesn't like null-ish values in catgories, but
# avoiding that requires a broader change to how missing values are
# handled in pipeline, so for now just silence the warning.
warnings.filterwarnings(
'ignore',
category=FutureWarning,
)
yield
_INDEXER_NAMES = [
'_' + name for (name, _) in pd.core.indexing.get_indexers_list()
]
def clear_dataframe_indexer_caches(df):
"""
Clear cached attributes from a pandas DataFrame.
By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on
DataFrames, resulting in refcycles that can lead to unexpectedly long-lived
DataFrames. This function attempts to clear those cycles by deleting the
cached indexers from the frame.
Parameters
----------
df : pd.DataFrame
"""
for attr in _INDEXER_NAMES:
try:
delattr(df, attr)
except AttributeError:
pass
| florentchandelier/zipline | zipline/utils/pandas_utils.py | Python | apache-2.0 | 5,926 |
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, nick=None, middlename=None, title=None, company=None, address=None, home_tel=None, mob_tel=None,
work_tel=None, fax=None, email=None, email2=None, homepage=None, birthday=None, id=None, secondary_tel=None, all_phones_from_home_page=None,
all_emails_from_home_page=None):
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.nick = nick
self.title = title
self.company = company
self.address = address
self.home_tel = home_tel
self.mob_tel = mob_tel
self.work_tel = work_tel
self.secondary_tel = secondary_tel
self.fax = fax
self.email = email
self.email2 = email2
self.homepage = homepage
self.birthday = birthday
self.id = id
self.all_phones_from_home_page=all_phones_from_home_page
self.all_emails_from_home_page=all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s:%s:%s:%s:%s" % (self.id, self.lastname, self.firstname, self.middlename, self.nick, self. title, self. company, self.address, self.home_tel, self.mob_tel)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| goeliv/python_training | model/contact.py | Python | apache-2.0 | 1,589 |
"""
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/benchmarks/bench_plot_nmf.py | Python | bsd-3-clause | 5,890 |
from aiida.orm.calculation.job import JobCalculation
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
from aiida.orm.data.array.trajectory import TrajectoryData
from aiida.orm.data.array import ArrayData
from aiida.common.exceptions import InputValidationError
from aiida.common.datastructures import CalcInfo, CodeInfo
from aiida.common.utils import classproperty
import numpy as np
def get_FORCE_CONSTANTS_txt(force_constants):
force_constants = force_constants.get_array('force_constants')
fc_shape = force_constants.shape
fc_txt = "%4d\n" % (fc_shape[0])
for i in range(fc_shape[0]):
for j in range(fc_shape[1]):
fc_txt += "%4d%4d\n" % (i+1, j+1)
for vec in force_constants[i][j]:
fc_txt +=("%22.15f"*3 + "\n") % tuple(vec)
return fc_txt
def get_trajectory_txt(trajectory):
cell = trajectory.get_cells()[0]
a = np.linalg.norm(cell[0])
b = np.linalg.norm(cell[1])
c = np.linalg.norm(cell[2])
alpha = np.arccos(np.dot(cell[1], cell[2])/(c*b))
gamma = np.arccos(np.dot(cell[1], cell[0])/(a*b))
beta = np.arccos(np.dot(cell[2], cell[0])/(a*c))
xhi = a
xy = b * np.cos(gamma)
xz = c * np.cos(beta)
yhi = np.sqrt(pow(b,2)- pow(xy,2))
yz = (b*c*np.cos(alpha)-xy * xz)/yhi
zhi = np.sqrt(pow(c,2)-pow(xz,2)-pow(yz,2))
xhi = xhi + max(0,0, xy, xz, xy+xz)
yhi = yhi + max(0,0, yz)
xlo_bound = np.min([0.0, xy, xz, xy+xz])
xhi_bound = xhi + np.max([0.0, xy, xz, xy+xz])
ylo_bound = np.min([0.0, yz])
yhi_bound = yhi + np.max([0.0, yz])
zlo_bound = 0
zhi_bound = zhi
ind = trajectory.get_array('steps')
lammps_data_file = ''
for i, position_step in enumerate(trajectory.get_positions()):
lammps_data_file += 'ITEM: TIMESTEP\n'
lammps_data_file += '{}\n'.format(ind[i])
lammps_data_file += 'ITEM: NUMBER OF ATOMS\n'
lammps_data_file += '{}\n'.format(len(position_step))
lammps_data_file += 'ITEM: BOX BOUNDS xy xz yz pp pp pp\n'
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(xlo_bound, xhi_bound, xy)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(ylo_bound, yhi_bound, xz)
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(zlo_bound, zhi_bound, yz)
lammps_data_file += ('ITEM: ATOMS x y z\n')
for position in position_step:
lammps_data_file += '{0:20.10f} {1:20.10f} {2:20.10f}\n'.format(*position)
return lammps_data_file
def structure_to_poscar(structure):
types = [site.kind_name for site in structure.sites]
atom_type_unique = np.unique(types, return_index=True)
sort_index = np.argsort(atom_type_unique[1])
elements = np.array(atom_type_unique[0])[sort_index]
elements_count= np.diff(np.append(np.array(atom_type_unique[1])[sort_index], [len(types)]))
poscar = '# VASP POSCAR generated using aiida workflow '
poscar += '\n1.0\n'
cell = structure.cell
for row in cell:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*row)
poscar += ' '.join([str(e) for e in elements]) + '\n'
poscar += ' '.join([str(e) for e in elements_count]) + '\n'
poscar += 'Cartesian\n'
for site in structure.sites:
poscar += '{0: 22.16f} {1: 22.16f} {2: 22.16f}\n'.format(*site.position)
return poscar
def parameters_to_input_file(parameters_object):
parameters = parameters_object.get_dict()
input_file = ('STRUCTURE FILE POSCAR\nPOSCAR\n\n')
input_file += ('FORCE CONSTANTS\nFORCE_CONSTANTS\n\n')
input_file += ('PRIMITIVE MATRIX\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['primitive'])[2])
input_file += ('\n')
input_file += ('SUPERCELL MATRIX PHONOPY\n')
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[0])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[1])
input_file += ('{} {} {} \n').format(*np.array(parameters['supercell'])[2])
input_file += ('\n')
return input_file
class DynaphopyCalculation(JobCalculation):
"""
A basic plugin for calculating force constants using Phonopy.
Requirement: the node should be able to import phonopy
"""
def _init_internal_params(self):
super(DynaphopyCalculation, self)._init_internal_params()
self._INPUT_FILE_NAME = 'input_dynaphopy'
self._INPUT_TRAJECTORY = 'trajectory'
self._INPUT_CELL = 'POSCAR'
self._INPUT_FORCE_CONSTANTS = 'FORCE_CONSTANTS'
self._OUTPUT_FORCE_CONSTANTS = 'FORCE_CONSTANTS_OUT'
self._OUTPUT_FILE_NAME = 'OUTPUT'
self._OUTPUT_QUASIPARTICLES = 'quasiparticles_data.yaml'
self._default_parser = 'dynaphopy'
@classproperty
def _use_methods(cls):
"""
Additional use_* methods for the namelists class.
"""
retdict = JobCalculation._use_methods
retdict.update({
"parameters": {
'valid_types': ParameterData,
'additional_parameter': None,
'linkname': 'parameters',
'docstring': ("Use a node that specifies the dynaphopy input "
"for the namelists"),
},
"trajectory": {
'valid_types': TrajectoryData,
'additional_parameter': None,
'linkname': 'trajectory',
'docstring': ("Use a node that specifies the trajectory data "
"for the namelists"),
},
"force_constants": {
'valid_types': ArrayData,
'additional_parameter': None,
'linkname': 'force_constants',
'docstring': ("Use a node that specifies the force_constants "
"for the namelists"),
},
"structure": {
'valid_types': StructureData,
'additional_parameter': None,
'linkname': 'structure',
'docstring': "Use a node for the structure",
},
})
return retdict
def _prepare_for_submission(self,tempfolder, inputdict):
"""
This is the routine to be called when you want to create
the input files and related stuff with a plugin.
:param tempfolder: a aiida.common.folders.Folder subclass where
the plugin should put all its files.
:param inputdict: a dictionary with the input nodes, as they would
be returned by get_inputdata_dict (without the Code!)
"""
try:
parameters_data = inputdict.pop(self.get_linkname('parameters'))
except KeyError:
pass
#raise InputValidationError("No parameters specified for this "
# "calculation")
if not isinstance(parameters_data, ParameterData):
raise InputValidationError("parameters is not of type "
"ParameterData")
try:
structure = inputdict.pop(self.get_linkname('structure'))
except KeyError:
raise InputValidationError("no structure is specified for this calculation")
try:
trajectory = inputdict.pop(self.get_linkname('trajectory'))
except KeyError:
raise InputValidationError("trajectory is specified for this calculation")
try:
force_constants = inputdict.pop(self.get_linkname('force_constants'))
except KeyError:
raise InputValidationError("no force_constants is specified for this calculation")
try:
code = inputdict.pop(self.get_linkname('code'))
except KeyError:
raise InputValidationError("no code is specified for this calculation")
time_step = trajectory.get_times()[1]-trajectory.get_times()[0]
##############################
# END OF INITIAL INPUT CHECK #
##############################
# =================== prepare the python input files =====================
cell_txt = structure_to_poscar(structure)
input_txt = parameters_to_input_file(parameters_data)
force_constants_txt = get_FORCE_CONSTANTS_txt(force_constants)
trajectory_txt = get_trajectory_txt(trajectory)
# =========================== dump to file =============================
input_filename = tempfolder.get_abs_path(self._INPUT_FILE_NAME)
with open(input_filename, 'w') as infile:
infile.write(input_txt)
cell_filename = tempfolder.get_abs_path(self._INPUT_CELL)
with open(cell_filename, 'w') as infile:
infile.write(cell_txt)
force_constants_filename = tempfolder.get_abs_path(self._INPUT_FORCE_CONSTANTS)
with open(force_constants_filename, 'w') as infile:
infile.write(force_constants_txt)
trajectory_filename = tempfolder.get_abs_path(self._INPUT_TRAJECTORY)
with open(trajectory_filename, 'w') as infile:
infile.write(trajectory_txt)
# ============================ calcinfo ================================
local_copy_list = []
remote_copy_list = []
# additional_retrieve_list = settings_dict.pop("ADDITIONAL_RETRIEVE_LIST",[])
calcinfo = CalcInfo()
calcinfo.uuid = self.uuid
# Empty command line by default
calcinfo.local_copy_list = local_copy_list
calcinfo.remote_copy_list = remote_copy_list
# Retrieve files
calcinfo.retrieve_list = [self._OUTPUT_FILE_NAME,
self._OUTPUT_FORCE_CONSTANTS,
self._OUTPUT_QUASIPARTICLES]
codeinfo = CodeInfo()
codeinfo.cmdline_params = [self._INPUT_FILE_NAME, self._INPUT_TRAJECTORY,
'-ts', '{}'.format(time_step), '--silent',
'-sfc', self._OUTPUT_FORCE_CONSTANTS, '-thm', # '--resolution 0.01',
'-psm','2', '--normalize_dos', '-sdata']
if 'temperature' in parameters_data.get_dict():
codeinfo.cmdline_params.append('--temperature')
codeinfo.cmdline_params.append('{}'.format(parameters_data.dict.temperature))
if 'md_commensurate' in parameters_data.get_dict():
if parameters_data.dict.md_commensurate:
codeinfo.cmdline_params.append('--MD_commensurate')
codeinfo.stdout_name = self._OUTPUT_FILE_NAME
codeinfo.code_uuid = code.uuid
codeinfo.withmpi = False
calcinfo.codes_info = [codeinfo]
return calcinfo
| abelcarreras/aiida_extensions | plugins/jobs/dynaphopy.py | Python | mit | 10,991 |
# Generated by Django 2.2.9 on 2020-01-15 11:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0032_auto_20200115_1121'),
]
operations = [
migrations.AlterModelOptions(
name='resourcebase',
options={'permissions': (('change_resourcebase_permissions', 'Can change resource permissions'), ('download_resourcebase', 'Can download resource'), ('publish_resourcebase', 'Can publish resource'), ('change_resourcebase_metadata', 'Can change resource metadata'))},
),
]
| tomkralidis/geonode | geonode/base/migrations/0033_auto_20200115_1145.py | Python | gpl-3.0 | 583 |
from os.path import join
from jedi.file_io import FolderIO
from test.helpers import get_example_dir
def test_folder_io_walk():
root_dir = get_example_dir('namespace_package')
iterator = FolderIO(root_dir).walk()
root, folder_ios, file_ios = next(iterator)
assert {f.path for f in folder_ios} == {join(root_dir, 'ns1'), join(root_dir, 'ns2')}
for f in list(folder_ios):
if f.path.endswith('ns1'):
folder_ios.remove(f)
root, folder_ios, file_ios = next(iterator)
assert folder_ios
assert root.path == join(root_dir, 'ns2')
folder_ios.clear()
assert next(iterator, None) is None
def test_folder_io_walk2():
root_dir = get_example_dir('namespace_package')
iterator = FolderIO(root_dir).walk()
root, folder_ios, file_ios = next(iterator)
folder_ios.clear()
assert next(iterator, None) is None
| snakeleon/YouCompleteMe-x64 | third_party/ycmd/third_party/jedi_deps/jedi/test/test_file_io.py | Python | gpl-3.0 | 873 |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import utils
from nova.virt import event as virtevent
from os_win import constants
from os_win import utilsfactory
from oslo_log import log as logging
import compute_hyperv.nova.conf
from compute_hyperv.nova import vmops
LOG = logging.getLogger(__name__)
CONF = compute_hyperv.nova.conf.CONF
class HyperVLifecycleEvent(virtevent.LifecycleEvent):
def __init__(self, uuid, name, transition, timestamp=None):
super(HyperVLifecycleEvent, self).__init__(uuid, transition, timestamp)
self.name = name
class InstanceEventHandler(object):
_TRANSITION_MAP = {
constants.HYPERV_VM_STATE_ENABLED: virtevent.EVENT_LIFECYCLE_STARTED,
constants.HYPERV_VM_STATE_DISABLED: virtevent.EVENT_LIFECYCLE_STOPPED,
constants.HYPERV_VM_STATE_PAUSED: virtevent.EVENT_LIFECYCLE_PAUSED,
constants.HYPERV_VM_STATE_SUSPENDED:
virtevent.EVENT_LIFECYCLE_SUSPENDED
}
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._listener = self._vmutils.get_vm_power_state_change_listener(
timeframe=CONF.hyperv.power_state_check_timeframe,
event_timeout=CONF.hyperv.power_state_event_polling_interval,
filtered_states=list(self._TRANSITION_MAP.keys()),
get_handler=True)
self._vmops = vmops.VMOps()
self._callbacks = []
def add_callback(self, callback):
self._callbacks.append(callback)
def start_listener(self):
utils.spawn_n(self._listener, self._handle_event)
def _handle_event(self, instance_name, instance_power_state):
# Instance uuid set by Nova. If this is missing, we assume that
# the instance was not created by Nova and ignore the event.
instance_uuid = self._vmops.get_instance_uuid(instance_name)
if instance_uuid:
self._emit_event(instance_name,
instance_uuid,
instance_power_state)
else:
LOG.debug("Instance uuid could not be retrieved for instance "
"%(instance_name)s. Instance state change event will "
"be ignored. Current power state: %(power_state)s.",
dict(instance_name=instance_name,
power_state=instance_power_state))
def _emit_event(self, instance_name, instance_uuid, instance_state):
virt_event = self._get_virt_event(instance_uuid,
instance_name,
instance_state)
for callback in self._callbacks:
utils.spawn_n(callback, virt_event)
def _get_virt_event(self, instance_uuid, instance_name, instance_state):
transition = self._TRANSITION_MAP[instance_state]
return HyperVLifecycleEvent(
uuid=instance_uuid,
name=instance_name,
transition=transition)
| stackforge/compute-hyperv | compute_hyperv/nova/eventhandler.py | Python | apache-2.0 | 3,590 |
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.distance.test_distance_steffensen.
This module contains unit tests for abydos.distance.Steffensen
"""
import unittest
from abydos.distance import Steffensen
class SteffensenTestCases(unittest.TestCase):
"""Test Steffensen functions.
abydos.distance.Steffensen
"""
cmp = Steffensen()
cmp_no_d = Steffensen(alphabet=0)
def test_steffensen_sim(self):
"""Test abydos.distance.Steffensen.sim."""
# Base cases
self.assertEqual(self.cmp.sim('', ''), 1.0)
self.assertEqual(self.cmp.sim('a', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'a'), 0.0)
self.assertEqual(self.cmp.sim('abc', ''), 0.0)
self.assertEqual(self.cmp.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp.sim('abcd', 'efgh'), 4.1196952743871653e-05)
self.assertAlmostEqual(self.cmp.sim('Nigel', 'Niall'), 0.2461588279)
self.assertAlmostEqual(self.cmp.sim('Niall', 'Nigel'), 0.2461588279)
self.assertAlmostEqual(self.cmp.sim('Colin', 'Coiln'), 0.2461588279)
self.assertAlmostEqual(self.cmp.sim('Coiln', 'Colin'), 0.2461588279)
self.assertAlmostEqual(
self.cmp.sim('ATCAACGAGT', 'AACGATTAG'), 0.439469213
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.sim('', ''), 1.0)
self.assertEqual(self.cmp_no_d.sim('a', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'a'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', ''), 0.0)
self.assertEqual(self.cmp_no_d.sim('', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.sim('abc', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.sim('abcd', 'efgh'), 1.0)
self.assertAlmostEqual(self.cmp_no_d.sim('Nigel', 'Niall'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Niall', 'Nigel'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Colin', 'Coiln'), 0.25)
self.assertAlmostEqual(self.cmp_no_d.sim('Coiln', 'Colin'), 0.25)
self.assertAlmostEqual(
self.cmp_no_d.sim('ATCAACGAGT', 'AACGATTAG'), 0.1090909091
)
def test_steffensen_dist(self):
"""Test abydos.distance.Steffensen.dist."""
# Base cases
self.assertEqual(self.cmp.dist('', ''), 0.0)
self.assertEqual(self.cmp.dist('a', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'a'), 1.0)
self.assertEqual(self.cmp.dist('abc', ''), 1.0)
self.assertEqual(self.cmp.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp.dist('abcd', 'efgh'), 0.9999588030472562)
self.assertAlmostEqual(self.cmp.dist('Nigel', 'Niall'), 0.7538411721)
self.assertAlmostEqual(self.cmp.dist('Niall', 'Nigel'), 0.7538411721)
self.assertAlmostEqual(self.cmp.dist('Colin', 'Coiln'), 0.7538411721)
self.assertAlmostEqual(self.cmp.dist('Coiln', 'Colin'), 0.7538411721)
self.assertAlmostEqual(
self.cmp.dist('ATCAACGAGT', 'AACGATTAG'), 0.560530787
)
# Tests with alphabet=0 (no d factor)
self.assertEqual(self.cmp_no_d.dist('', ''), 0.0)
self.assertEqual(self.cmp_no_d.dist('a', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'a'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', ''), 1.0)
self.assertEqual(self.cmp_no_d.dist('', 'abc'), 1.0)
self.assertEqual(self.cmp_no_d.dist('abc', 'abc'), 0.0)
self.assertEqual(self.cmp_no_d.dist('abcd', 'efgh'), 0.0)
self.assertAlmostEqual(self.cmp_no_d.dist('Nigel', 'Niall'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Niall', 'Nigel'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Colin', 'Coiln'), 0.75)
self.assertAlmostEqual(self.cmp_no_d.dist('Coiln', 'Colin'), 0.75)
self.assertAlmostEqual(
self.cmp_no_d.dist('ATCAACGAGT', 'AACGATTAG'), 0.8909090909
)
if __name__ == '__main__':
unittest.main()
| chrislit/abydos | tests/distance/test_distance_steffensen.py | Python | gpl-3.0 | 4,741 |
# Used by the ErrorHandlerResolutionTests test case.
from .views import empty_view
urlpatterns = []
handler400 = empty_view
handler404 = empty_view
handler500 = empty_view
| DONIKAN/django | tests/urlpatterns_reverse/urls_error_handlers_callables.py | Python | bsd-3-clause | 175 |
#
# This file is part of ROSbots Setup Tools.
#
# Copyright
#
# Copyright (C) 2017 Jack Pien <[email protected]>
#
# License
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details at
# <http://www.gnu.org/licenses/lgpl-3.0-standalone.html>
#
# Documentation
#
# http://www.rosbots.com
#
import os
import datetime as dt
import random
import time
from fabric.api import *
import fabric.contrib.files as fabfiles
from fabric.utils import fastprint
#env.hosts = ["localhost"]
env.user = 'pi'
env.shell = '/bin/bash -l -c'
is_debug = False
def _get_input(msg, force_need_query=False):
global is_debug
if is_debug or force_need_query:
val = raw_input(msg + "\n")
return val
else:
return ""
def _fp(msg):
fastprint(msg + "\n")
def _pp(msg):
"""
Print then pause
"""
global is_debug
_fp(msg)
if is_debug:
programPause = _get_input("Press the <ENTER> key to continue...")
WS_DIR = "/ros_catkin_ws"
INSTALL_DIR = WS_DIR + "/build/opt/ros/kinetic"
def main_setup_only_rosbots_components():
step_7_setup_ros_rosbots_packages()
step_8_setup_mcu_uno_support()
step_9_setup_mcu_uno_support_part_2()
def main_setup_ros_opencv_for_rosbots():
step_1_setup_ros_for_pi()
step_2_setup_ros_robot_packages()
#step_3_setup_ros_rosbots_packages()
step_4_setup_opencv_for_pi()
step_5_setup_ros_robot_image_common_package()
step_6_setup_ros_robot_vision_packages()
step_7_setup_ros_rosbots_packages()
step_8_setup_mcu_uno_support()
step_9_setup_mcu_uno_support_part_2()
def main_setup_ros_opencv():
step_1_setup_ros_for_pi()
step_2_setup_ros_robot_packages()
step_4_setup_opencv_for_pi()
step_5_setup_ros_robot_image_common_package()
step_6_setup_ros_robot_vision_packages()
def helloworld():
run("ls -la")
#with cd("~"):
# home_path = run("pwd")
# ws_dir = home_path + WS_DIR
# put("./rosbots_service_template.bash", "~/rosbots_template")
# run("cat rosbots_template | sed 's/_TEMPLATE_HOME/" + home_path.replace("/", "\/") + "/' | sed 's/_TEMPLATE_WS_PATH/" + ws_dir.replace("/", "\/") + "/' > rosbots")
def how_to_test_rosbots_python_scripts():
_fp("Say you wrote a rosbots python script called foo.py. (1) chmod +x foo.py. (2) scp it over to the /home/pi/ros_catkin_ws/build/opt/ros/kinetic/share/rosbots_driver. (3) from remote machine 'rosrun rosbots_driver foo.py'")
def push_test_ros_script(path_fn=None):
if path_fn == None:
_fp("\nERROR\nPlease specify local ROS script name")
_fp("$ fab push_test_ros_script:<script>")
return
fn = path_fn.split("/")[-1]
remote_path = "/home/pi/ros_catkin_ws/build/opt/ros/kinetic/share"
ros_pkg_name = "rosbots_driver"
_fp("Pushing " + path_fn + " to remote location: " +
remote_path + "/" + ros_pkg_name)
put(path_fn, remote_path + "/" + ros_pkg_name)
run("chmod +x " + remote_path + "/" + ros_pkg_name + "/" + fn)
#open_shell("rosrun " + ros_pkg_name + " " + fn)
run("sudo su -c 'source /home/pi/ros_catkin_ws/build/opt/ros/kinetic/setup.bash && export PYTHONPATH=/home/pi/lib/python:${PYTHONPATH} && rosrun " + ros_pkg_name + " " + fn + "'")
def push_test_rosbots_motor_driver_script():
run("echo 'Starting...'")
home_path = run("pwd")
rosbots_startup_fn = "rosbots_startup.sh"
local_md_dir = "../../ros_ws/src/rosbots_driver/scripts/rosbots_driver"
remote_md_dir = "/home/pi/ros_catkin_ws/build/opt/ros/kinetic/lib/rosbots_driver"
md_fn = "motor_driver.py"
rosnode_name = "/motor_driver"
# Kill current motor_driver node
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
if run("rosnode list | grep -i " + rosnode_name, warn_only=True).succeeded:
_fp("Killing current " + rosnode_name + " rosnode")
run("rosnode kill `rosnode list | grep -i " + rosnode_name + "`")
#_fp(actual_name)
#run("rosnode kill " + rosnode_name)
env.shell = old_shell
# Push new startup script
if False:
put("./rosbots_startup.sh", "~/rosbots_startup.sh")
run("chmod +x ~/rosbots_startup.sh")
# Push the new motor driver file
if fabfiles.exists(remote_md_dir + "/" + md_fn) == False:
_fp("No remote " + md_fn + " found!!! Quitting")
return
else:
put(local_md_dir + "/" + md_fn, remote_md_dir + "/" + md_fn)
run("rm " + remote_md_dir + "/" + md_fn + "c", warn_only=True)
# Start the rosbots startup script
sudo("export ROSBOTS_HOME=/home/pi; export ROSBOTS_WS_PATH=/home/pi/ros_catkin_ws; " + home_path + "/" + rosbots_startup_fn)
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
_fp("List of running ros nodes")
run("rosnode list")
env.shell = old_shell
def setup_wifi_on_pi():
supplicant_fn = "/etc/wpa_supplicant/wpa_supplicant.conf"
run("echo 'Starting...'")
#if run("grep 'country=GB' " + supplicant_fn, warn_only=True).succeeded:
# pass
#else:
# _fp("")
# _pp("You should probably set 'country=US' in your supplicant file " + \
# supplicant_fn + " when you get a chance...")
wifi_reg_domain = _get_input("What is your country's wifi regulatory domain (ISO 3166 alpha2 country code, ie 'US')?", force_need_query=True)
_fp(wifi_reg_domain)
ssid_name = _get_input("What is the SSID?", force_need_query=True)
_fp(ssid_name)
if sudo("grep 'ssid=\"" + ssid_name + "\"' " + supplicant_fn, \
warn_only=True).succeeded:
_fp("This SSID is already set up")
else:
wpa_pwd = _get_input("What is the WPA pwd?", force_need_query=True)
_fp(wpa_pwd)
name = _get_input("What do you want to name this network?", force_need_query=True)
_fp(name)
_fp("Adding the network you specified into " + supplicant_fn)
network_config = "country=" + wifi_reg_domain + "\n" + \
"\n\n" + \
"network={\n" + \
" ssid=\"" + ssid_name + "\"\n" + \
" psk=\"" + wpa_pwd + "\"\n" + \
" id_str=\"" + name + "\"\n" + \
"}\n"
sudo("cp " + supplicant_fn + " " + supplicant_fn + ".old")
sudo("echo '" + network_config + "' >> " + supplicant_fn)
_fp("To get IP address of Pi, from a linux system - 'arp -a'")
def step_8_setup_mcu_uno_support():
_pp("Plug in the UNO board to the RPi's USB port")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
pio_path = rosbots_path + "/platformio/rosbots_firmware"
rosserial_path = git_path + "/rosserial"
ws_dir = home_path + "/rosbots_catkin_ws"
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
# Just download, we'll build it isolated later
#_setup_ros_other_packages("actionlib_msgs", run_rosdep=False)
_setup_ros_other_packages("nav_msgs", run_rosdep=False)
# Need nav_msgs compiled
with cd(main_ros_ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
#run(main_ros_ws_dir + "/src/catkin/bin/catkin_make -j1 --pkg nav_msgs")
#run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install -j1 --pkg nav_msgs")
#run("./src/catkin/bin/catkin_make_isolated --pkg actionlib_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
run("./src/catkin/bin/catkin_make_isolated --pkg nav_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
env.shell = old_shell
# Old pip causes incompleteread importerror
sudo("easy_install --upgrade pip")
# So we can access USB serial port
sudo("usermod -a -G dialout pi")
# Some requirements
sudo("pip install -U testresources")
sudo("pip install -U platformio")
sudo("pip install -U backports.functools_lru_cache")
_fp("=============")
_pp("If this is the first time running setup, the next step will most likely fail since you need a reboot to enable the UNO drivers. If it fails, reboot and run this step again.")
_fp("=============\n")
def step_9_setup_mcu_uno_support_part_2():
_pp("Plug in the UNO board to the RPi's USB port")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
pio_path = rosbots_path + "/platformio/rosbots_firmware"
rosserial_path = git_path + "/rosserial"
ws_dir = home_path + "/rosbots_catkin_ws"
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
with cd(pio_path):
run("platformio run -e uno -t upload")
# We need diagnostic_msgs, but just download, we'll compile
# it on our own
_setup_ros_other_packages("diagnostic_msgs", run_rosdep=False)
# Download and install rosserial
if not fabfiles.exists(rosserial_path):
with cd(git_path):
run("git clone https://github.com/ros-drivers/rosserial.git")
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists("rosserial"):
run("rm rosserial")
run("ln -s " + rosserial_path)
else:
_fp("Found rosserial repo, just fetching top and rebasing")
with cd(rosserial_path):
run("git fetch origin")
run("git rebase origin/jade-devel")
with cd(ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make -j1")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install -j1")
env.shell = old_shell
# Need diagnostic_msgs which rosserial_python needs
# catkin_make_isolated --pkg diagnostic_msgs --install -DCMAKE_BUILD_TYPE=Release --install-space /home/pi/ros_catkin_ws/build/opt/ros/kinetic
subpackage = "diagnostic_msgs"
with cd(main_ros_ws_dir):
run("./src/catkin/bin/catkin_make_isolated --pkg " + subpackage + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j1")
#Update pip if necessary
sudo("easy_install --upgrade pip")
# Rerun the init script
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
def step_5_setup_ros_robot_image_common_package():
home_path = run("pwd")
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
_pp("Usually done after you set up OpenCV and the other robot and rosbot packages. This mainly sets up image_transport.")
_setup_ros_other_packages("image_common")
def step_2_setup_ros_robot_packages():
_pp("After you successfully install ros_com stuff, install some others. This installs geometry_msgs needed for Twist among other types of basic telemetry messages.")
_setup_ros_other_packages("geometry_msgs")
_setup_ros_other_packages("teleop_twist_keyboard")
def _setup_ros_packages_from_git(ros_package_name, git_url, subpackage_list):
run("echo 'Starting...'")
home_path = run("pwd")
git_path = home_path + "/gitspace"
ros_package_path = git_path + "/" + ros_package_name #"/rosbots"
ws_dir = home_path + WS_DIR
install_dir = home_path + INSTALL_DIR
_fp("Do we need to create gitspace folder?")
if not fabfiles.exists(git_path):
run("mkdir " + git_path)
_fp("Do we need to git clone the repo?")
if not fabfiles.exists(ros_package_path):
_fp("Did not find " + ros_package_name + " repo, cloning...")
with cd(git_path):
run("git clone " + git_url)
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists(ros_package_name):
run("rm " + ros_package_name)
run("ln -s " + ros_package_path)
else:
#_fp("Found the repo, just fetching top and rebasing")
#with cd(ros_package_path):
# run("git fetch origin")
# run("git rebase origin/master")
_pp("Found the repo, not doing anything - feel free to git fetch and rebase manually")
for subpackage in subpackage_list:
_fp("Compiling " + subpackage + "...")
with cd(ws_dir):
run("./src/catkin/bin/catkin_make_isolated --pkg " + subpackage + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j1")
def step_6_setup_ros_robot_vision_packages():
_fp("Usually done after you set up OpenCV and the other robot and rosbot packages")
_pp("This sets up mainly cv_bridge so we can pass CV image messages around. Setting up from github instead of rosinstall because rosinstall will pull in OpenCV automatically and you should have already built it from source.")
_setup_ros_packages_from_git("vision_opencv", \
"https://github.com/ros-perception/vision_opencv.git", \
["cv_bridge", "image_geometry", "vision_opencv"])
def step_7_setup_ros_rosbots_packages():
run("echo 'Starting...'")
home_path = run("pwd")
git_path = home_path + "/gitspace"
rosbots_path = git_path + "/rosbots_driver"
ws_dir = home_path + "/rosbots_catkin_ws" # home_path + WS_DIR
install_dir = home_path + INSTALL_DIR
main_ros_ws_dir = home_path + WS_DIR
# Just download tf and geometry2, which includes tf2.
# We'll compile it ourselves later
_setup_ros_other_packages("geometry", run_rosdep=False)
_setup_ros_other_packages("geometry2", run_rosdep=False)
# Need tf and tf2 compiled
with cd(main_ros_ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
package_list = [
"angles", "actionlib_msgs", "actionlib", "tf2_msgs", "tf2", "tf2_py", "tf2_ros", "tf"]
for pkg in package_list:
run("./src/catkin/bin/catkin_make_isolated --pkg " + pkg + " --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
env.shell = old_shell
sudo("apt-get install -y python-pip")
sudo("pip install picamera")
# Create a separate rosbots_catkin_ws outside of core ROS
if not fabfiles.exists(ws_dir):
_fp("Need to create and init rosbots catkin workspace")
run("mkdir -p " + ws_dir + "/src")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
with cd(ws_dir + "/src"):
run(main_ros_ws_dir + "/src/catkin/bin/catkin_init_workspace")
with cd(ws_dir):
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
src_cmd = "source " + ws_dir + "/devel/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROSbots catkin ws env setup.bash is already in your bashrc")
else:
_pp("Going to add ROSbots catkin ws source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
if not fabfiles.exists(git_path):
_fp("Did not find rosbots repo, cloning...")
run("mkdir " + git_path)
if not fabfiles.exists(rosbots_path):
with cd(git_path):
run("git clone https://github.com/ROSbots/rosbots_driver.git")
_fp("Creating symbolic link to main ros workspace")
with cd(ws_dir + "/src"):
if fabfiles.exists("rosbots_driver"):
run("rm rosbots_driver")
run("ln -s " + rosbots_path)
else:
_fp("Found rosbots repo, just fetching top and rebasing")
with cd(rosbots_path):
run("git fetch origin")
run("git rebase origin/master")
with cd(ws_dir):
#run("./src/catkin/bin/catkin_make_isolated --pkg rosbots_driver --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make")
run(main_ros_ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
# Installing RPIO DMA PWM library
with cd(git_path):
# Don't install RPIO library. May be causing non-deterministic
# kernel panic when used.
#if not fabfiles.exists("RPIO"):
if False:
_pp("Did not find RPIO library so downloading and setting up")
# Old library does not support RPi 3
#run("git clone https://github.com/metachris/RPIO.git --branch v2 --single-branch")
#run("git clone https://github.com/limuxy/RPIO.git")
run("git clone https://github.com/ROSbots/RPIO.git --branch v2_branch --single-branch")
with cd("RPIO"):
run("python setup.py build")
_pp("Did build complete for RPIO?")
run("mkdir -p " + home_path + "/lib/python")
run("export PYTHONPATH=" + home_path + "/lib/python; python setup.py -v install --home " + home_path)
_pp("Did RPIO install correctly into " + home_path + "?")
# Update with newest bashrc for rosbots
put("./sourceme_rosbots.bash", "~/")
# Rerun the init script
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
def _setup_ros_other_packages(rospkg, run_rosdep=True):
run("echo 'Starting...'")
home_path = run("pwd")
ws_dir = home_path + WS_DIR
if not fabfiles.exists(ws_dir):
_fp("ROS Workspace not found - run the main set up first")
return
with cd(ws_dir):
ts = str(time.time()).split(".")[0]
fn = "kinetic-custom_" + str(ts) + "_ros.rosinstall"
run("rosinstall_generator " + rospkg + " --rosdistro kinetic --deps --wet-only --tar > " + fn)
run("cat " + fn)
_pp("Did rosinstall generator create the install file correctly? If so, we're going to merge and update the workspace. (If there are duplicate packages, hit DELETE and REPLACE!)")
run("wstool merge -y -t src " + fn)
_pp("Did the wstool merge correctly? If so, we are going to update on the install file for the workspace.")
run("wstool update --delete-changed-uris -t src")
_pp("Did the wstool update correctly? If so, we are going to update dependencies.")
if run_rosdep:
run("rosdep install --from-paths src --ignore-src --rosdistro kinetic -y -r --os=debian:jessie")
_pp("Did the dependencies update ok? If so, let's compile the new packages.")
run("./src/catkin/bin/catkin_make_isolated --install -DCMAKE_BUILD_TYPE=Release --install-space " + home_path + INSTALL_DIR + " -j1")
def step_4_setup_opencv_for_pi():
"""
To build this in a Docker container:
run:
docker run -it --name rosbots_build rosbots-raspbian:lite /bin/bash
apt-get update; apt-get -y upgrade
apt-get install -y libgdk-pixbuf2.0-dev libpango1.0-dev libcairo2-dev
apt-get install -y libgtk2.0-dev
apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev libavutil-dev python-pip git
pip install numpy
mkdir -p /home/pi/gitspace
cd /home/pi/gitspace
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout -b 3.4.6_branch tags/3.4.6
cd ../
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout -b 3.4.6_branch tags/3.4.6
cd ../opencv
mkdir build
cd build
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local -D INSTALL_PYTHON_EXAMPLES=ON -D OPENCV_ENABLE_NONFREE=ON -D OPENCV_EXTRA_MODULES_PATH=/home/pi/gitspace/opencv_contrib/modules -D BUILD_EXAMPLES=ON ..
make -j4
On physcial RPi:
cd /home/pi/gitspace
git clone https://github.com/opencv/opencv.git
cd opencv
git checkout -b 3.4.6_branch tags/3.4.6
cd ../
git clone https://github.com/opencv/opencv_contrib.git
cd opencv_contrib
git checkout -b 3.4.6_branch tags/3.4.6
copy /home/pi/gitspace/opencv/build to /home/pi/gitspace/opencv
sudo apt-get update; sudo apt-get -y upgrade
sudo apt-get install -y libgdk-pixbuf2.0-dev libpango1.0-dev libcairo2-dev
sudo apt-get install -y libgtk2.0-dev
sudo apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev libavutil-dev python-pip git
then cd /home/pi/gitspace/opencv/build,
'sudo make install/fast', 'sudo ldconfig'
"""
_pp("Roughly following http://www.pyimagesearch.com/2016/04/18/install-guide-raspberry-pi-3-raspbian-jessie-opencv-3/")
#_fp("Update system first")
#sudo("apt-get update")
#sudo("apt-get -y upgrade")
_fp("Installing dependencies for OpenCV")
# Need to install libgtk2.0 first in Stretch?!?
sudo("apt-get install -y libgtk2.0-dev")
sudo("apt-get install -y build-essential cmake pkg-config libjpeg-dev libtiff5-dev libjasper-dev libpng12-dev libavcodec-dev libavformat-dev libswscale-dev libv4l-dev libxvidcore-dev libx264-dev libatlas-base-dev gfortran python2.7-dev python3-dev")
# Needed for web_video_server and perhaps help with OpenCV support as well
sudo("apt-get install -y libavutil-dev")
sudo("apt-get install -y python-pip")
sudo("sudo pip install numpy")
sudo("sudo pip install --upgrade numpy")
home_path = run("pwd")
git_path = home_path + "/gitspace"
_fp("Do we need to create gitspace folder?")
if not fabfiles.exists(git_path):
run("mkdir " + git_path)
_fp("Git cloning OpenCV if need be")
if not fabfiles.exists(git_path + "/opencv"):
with cd(git_path):
run("git clone https://github.com/opencv/opencv.git")
with cd(git_path + "/opencv"):
run("git tag -l")
#_pp("We are compiling 3.4.1 - make sure this is the latest from the tag list printed above")
#run("git checkout -b 3.4.1_branch tags/3.4.1")
_pp("We are compiling 3.4.6 - make sure this is the latest from the tag list printed above")
run("git checkout -b 3.4.6_branch tags/3.4.6")
opencv_contrib_path = git_path + "/opencv_contrib"
if not fabfiles.exists(opencv_contrib_path):
with cd(git_path):
run("git clone https://github.com/opencv/opencv_contrib.git")
with cd(opencv_contrib_path):
run("git tag -l")
#_pp("We are compiling 3.4.1 - make sure this is the latest from the tag list printed above")
#run("git checkout -b 3.4.1_branch tags/3.4.1")
_pp("We are compiling 3.4.6 - make sure this is the latest from the tag list printed above")
run("git checkout -b 3.4.6_branch tags/3.4.6")
_fp("Setting up OpenCV cmake if need be")
if not fabfiles.exists(git_path + "/opencv/build"):
with cd(git_path + "/opencv"):
run("mkdir build")
# Set up compile
with cd(git_path + "/opencv/build"):
run("cmake -D CMAKE_BUILD_TYPE=RELEASE " + \
"-D CMAKE_INSTALL_PREFIX=/usr/local " + \
"-D INSTALL_PYTHON_EXAMPLES=ON -D OPENCV_ENABLE_NONFREE=ON " + \
"-D OPENCV_EXTRA_MODULES_PATH=" + \
opencv_contrib_path + "/modules " + \
"-D BUILD_EXAMPLES=ON ..")
# Compile
_fp("Compiling OpenCV...")
with cd(git_path + "/opencv/build"):
run("make -j1")
sudo("make install")
sudo("ldconfig")
def step_x_setup_ros_for_ubuntu_mate_pi():
run("echo 'Roughly following http://wiki.ros.org/kinetic/Installation/Ubuntu'")
_pp("* If you need to do raspi-config stuff, CTRL-C out and do that before running this script")
# Setup ROS Repositories
if not fabfiles.exists("/etc/apt/sources.list.d/ros-latest.list"):
sudo("apt-get update")
sudo("sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main\" > /etc/apt/sources.list.d/ros-latest.list'")
sudo("apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116") #apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 0xB01FA116")
sudo("apt-get update")
sudo("apt-get -y upgrade")
else:
_fp("ros-lastest.list already exists... skipping set up")
sudo("apt-get update")
sudo("apt-get -y upgrade")
sudo("apt-get install -y ros-kinetic-ros-base")
def step_1_setup_ros_for_pi():
"""
To compile ros2 on in a Docker Raspbian container:
docker run -it --name rosbots_ros2_build rosbots-raspbian:lite /bin/bash
update-locale LC_ALL=en_GB.UTF-8 LANG=en_GB.UTF-8
export LANG=en_GB.UTF-8
export LC_ALL=en_GB.UTF-8
apt update && apt install -y \
build-essential \
cmake \
git \
python3-pip \
python-rosdep \
libxml2-dev \
libxslt1-dev \
wget
apt install -y virtualenvwrapper
source /usr/share/virtualenvwrapper/virtualenvwrapper.sh
mkvirtualenv py_3 --python=/usr/bin/python3
pip install -U argcomplete catkin_pkg colcon-common-extensions coverage empy flake8 flake8-blind-except flake8-builtins flake8-class-newline flake8-comprehensions flake8-deprecated flake8-docstrings flake8-import-order flake8-quotes lark-parser mock nose pep8 pydocstyle pyparsing setuptools vcstool \
pytest-repeat \
pytest-rerunfailures \
pytest \
pytest-cov \
pytest-runner \
lxml \
rosdep
apt-get install --no-install-recommends -y \
libasio-dev \
libtinyxml2-dev
mkdir -p /home/pi/ros2_ws/src
cd /home/pi/ros2_ws
wget https://raw.githubusercontent.com/ros2/ros2/release-latest/ros2.repos
vcs import src < ros2.repos
(sudo) rosdep init
rosdep update
rosdep install --from-paths src --ignore-src --rosdistro crystal -y -r --os=debian:stretch
pip install -U lark-parser
colcon build --symlink-install --packages-skip ros1_bridge --packages-ignore qt_gui_cpp rqt_gui_cpp
On the physical RPi, do all steps above except the colcon build step
then, docker cp /home/pi/ros2_ws/install ./build ./log to the physical RPi /home/pi/ros2_ws
Install python3.6
Change these scripts to use the python3 in the correct virtualenv directory
install/ros2cli/bin/_ros2_daemon:#!/root/.virtualenvs/py_3/bin/python3
install/ros2cli/bin/ros2:#!/root/.virtualenvs/py_3/bin/python3
. ~/ros2_ws/install/local_setup.bash (or setup.bash)
ros2 run demo_nodes_cpp talker
ros2 run demo_nodes_py listener
"""
global WS_DIR
global INSTALL_DIR
run("echo 'Roughly following http://wiki.ros.org/ROSberryPi/Installing%20ROS%20Kinetic%20on%20the%20Raspberry%20Pi'")
_fp("Set up / compile ROS on Rasbian Jessie Lite 2016-05-27")
_pp("* If you need to do raspi-config stuff, CTRL-C out and do that before running this script")
# Setup ROS Repositories
if not fabfiles.exists("/etc/apt/sources.list.d/ros-latest.list"):
# Raspbian Stretch does not have dirmngr installed by default. This
# is needed for apt-key
sudo("apt-get update")
sudo("apt-get -y install dirmngr")
sudo("sh -c 'echo \"deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main\" > /etc/apt/sources.list.d/ros-latest.list'")
sudo("sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116") #apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 0xB01FA116")
sudo("apt-get update")
sudo("apt-get -y upgrade")
else:
_fp("ros-lastest.list already exists... skipping set up")
sudo("apt-get update")
sudo("apt-get -y upgrade")
# Install Bootstrap Dependencies
sudo("apt-get install -y python-rosdep python-rosinstall-generator python-wstool python-rosinstall build-essential cmake")
# Initializing rosdep
if not fabfiles.exists("/etc/ros/rosdep/sources.list.d/20-default.list"):
sudo("rosdep init")
run("rosdep update")
home_path = run("pwd")
ws_dir = home_path + WS_DIR
# Create catkin workspace
if not fabfiles.exists(ws_dir):
run("mkdir -p " + ws_dir)
# Compile
with cd(ws_dir):
if not fabfiles.exists("kinetic-ros_comm-wet.rosinstall"):
run("rosinstall_generator ros_comm --rosdistro kinetic --deps --wet-only --tar > kinetic-ros_comm-wet.rosinstall")
if not fabfiles.exists("src"):
_fp("The following wstool downloads the source code needed")
_pp("If wstool init fails or is interrupted, you can resume the download by running:\n wstool update -j 2 -t src\n BTW, the -j 2 option downloads 2 packages in parallel")
run("wstool init -j 2 src kinetic-ros_comm-wet.rosinstall")
else:
_pp("Looks like you had already tried 'wstool init...', so continuing with 'wstool update...'")
run("wstool update --delete-changed-uris -j 2 -t src")
rval = _get_input("Did wstool download everything ok?\n(NO to quit & resolve, ENTER to continue)")
if rval == "NO":
return
# Resolve dependencies
run("rosdep install -y --from-paths src --ignore-src --rosdistro kinetic -r --os=debian:jessie")
install_dir = home_path + INSTALL_DIR
_fp("All dependencies have been resolved, going to start compiling and install into: " + install_dir)
if not fabfiles.exists(install_dir):
run("mkdir -p " + install_dir)
rval = _get_input("Continue with compile or skip? SKIP to skip compile, ENTER to continue...")
if rval != "SKIP":
run("./src/catkin/bin/catkin_make_isolated --install -DCMAKE_BUILD_TYPE=Release --install-space " + install_dir + " -j2")
_rval = _get_input("Did the compile succeed?\n(NO to quit and fix, ENTER to continue)")
if rval == "NO":
return
src_cmd = "source " + install_dir + "/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROS env setup is already in your bashrc")
else:
_pp("Going to add ROS source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
run("echo 'export ROSBOTS_MASTER=1\n' >> ~/.bashrc")
# Add some custom python library paths
run("echo 'export PYTHONPATH=/home/pi/lib/python:${PYTHONPATH}\n' >> ~/.bashrc")
# Add other setups for rosbots
put("./sourceme_rosbots.bash", "~/")
run("echo 'source ~/sourceme_rosbots.bash' >> ~/.bashrc")
# Create a separate rosbots_catkin_ws outside of core ROS
rosbots_ws_dir = home_path + "/rosbots_catkin_ws"
if not fabfiles.exists(rosbots_ws_dir):
_fp("Need to create and init rosbots catkin workspace")
run("mkdir -p " + rosbots_ws_dir + "/src")
old_shell = env.shell
env.shell = '/bin/bash -l -c -i'
with cd(rosbots_ws_dir + "/src"):
run(ws_dir + "/src/catkin/bin/catkin_init_workspace")
with cd(rosbots_ws_dir):
run(ws_dir + "/src/catkin/bin/catkin_make")
run(ws_dir + "/src/catkin/bin/catkin_make install")
env.shell = old_shell
src_cmd = "source " + rosbots_ws_dir + "/devel/setup.bash"
if run("grep '" + src_cmd + "' ~/.bashrc", warn_only=True).succeeded:
_fp("Sourcing of ROSbots catkin ws env setup.bash is already in your bashrc")
else:
_pp("Going to add ROSbots catkin ws source setup into your bashrc")
run("echo '" + src_cmd + "\n' >> ~/.bashrc")
_pp("All ROS components should be compiled and installed. Going to set up init.d to run ROSBots as a service.")
# Copy over the rosbots init script - which is kicked off by the init.d
# service framework
put("./rosbots_startup.sh", "~/rosbots_startup.sh")
run("chmod +x ~/rosbots_startup.sh")
put("./rosbots_shutdown.sh", "~/rosbots_shutdown.sh")
run("chmod +x ~/rosbots_shutdown.sh")
# Set up and install the init.d service which will fork and call
# the rosbots startup script above
put("./rosbots_service_template.bash", "~/rosbots_template")
run("cat rosbots_template | sed 's/_TEMPLATE_HOME/" + home_path.replace("/", "\/") + "/' | sed 's/_TEMPLATE_WS_PATH/" + ws_dir.replace("/", "\/") + "/' > rosbots")
run("rm rosbots_template")
sudo("mv rosbots /etc/init.d/")
sudo("chown root:root /etc/init.d/rosbots")
sudo("chmod 755 /etc/init.d/rosbots")
sudo("update-rc.d rosbots defaults")
sudo("systemctl daemon-reload")
sudo("systemctl stop rosbots")
sudo("systemctl start rosbots")
_fp("To get IP address of Pi, from a linux system - 'arp -a'")
_fp("Done...")
| ROSbots/rosbots_setup_tools | rpi_setup/fabfile.py | Python | gpl-3.0 | 34,336 |
import wave
import random
import tarfile
import threading
import numpy as np
import tensorflow as tf
import unicodedata
import codecs
from os import path
from os import rmdir
from os import remove
from glob import glob
from math import ceil
from Queue import Queue
from os import makedirs
from sox import Transformer
from itertools import cycle
from os.path import getsize
from threading import Thread
from Queue import PriorityQueue
from util.stm import parse_stm_file
from util.gpu import get_available_gpus
from util.text import text_to_char_array, ctc_label_dense_to_sparse
from tensorflow.python.platform import gfile
from util.audio import audiofile_to_input_vector
from tensorflow.contrib.learn.python.learn.datasets import base
class DataSets(object):
def __init__(self, train, dev, test):
self._dev = dev
self._test = test
self._train = train
def start_queue_threads(self, session):
self._dev.start_queue_threads(session)
self._test.start_queue_threads(session)
self._train.start_queue_threads(session)
@property
def train(self):
return self._train
@property
def dev(self):
return self._dev
@property
def test(self):
return self._test
class DataSet(object):
def __init__(self, txt_files, thread_count, batch_size, numcep, numcontext):
self._numcep = numcep
self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)])
self._x_length = tf.placeholder(tf.int32, [])
self._y = tf.placeholder(tf.int32, [None,])
self._y_length = tf.placeholder(tf.int32, [])
self._example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []],
dtypes=[tf.float32, tf.int32, tf.int32, tf.int32],
capacity=2 * self._get_device_count() * batch_size)
self._enqueue_op = self._example_queue.enqueue([self._x, self._x_length, self._y, self._y_length])
self._txt_files = txt_files
self._batch_size = batch_size
self._numcontext = numcontext
self._thread_count = thread_count
self._files_circular_list = self._create_files_circular_list()
def _get_device_count(self):
available_gpus = get_available_gpus()
return max(len(available_gpus), 1)
def start_queue_threads(self, session):
batch_threads = [Thread(target=self._populate_batch_queue, args=(session,)) for i in xrange(self._thread_count)]
for batch_thread in batch_threads:
batch_thread.daemon = True
batch_thread.start()
def _create_files_circular_list(self):
priorityQueue = PriorityQueue()
for txt_file in self._txt_files:
stm_dir = path.sep + "stm" + path.sep
wav_dir = path.sep + "wav" + path.sep
wav_file = path.splitext(txt_file.replace(stm_dir, wav_dir))[0] + ".wav"
wav_file_size = getsize(wav_file)
priorityQueue.put((wav_file_size, (txt_file, wav_file)))
files_list = []
while not priorityQueue.empty():
priority, (txt_file, wav_file) = priorityQueue.get()
files_list.append((txt_file, wav_file))
return cycle(files_list)
def _populate_batch_queue(self, session):
for txt_file, wav_file in self._files_circular_list:
source = audiofile_to_input_vector(wav_file, self._numcep, self._numcontext)
source_len = len(source)
with codecs.open(txt_file, encoding="utf-8") as open_txt_file:
target = unicodedata.normalize("NFKD", open_txt_file.read()).encode("ascii", "ignore")
target = text_to_char_array(target)
target_len = len(target)
session.run(self._enqueue_op, feed_dict={
self._x: source,
self._x_length: source_len,
self._y: target,
self._y_length: target_len})
def next_batch(self):
source, source_lengths, target, target_lengths = self._example_queue.dequeue_many(self._batch_size)
sparse_labels = ctc_label_dense_to_sparse(target, target_lengths, self._batch_size)
return source, source_lengths, sparse_labels
@property
def total_batches(self):
# Note: If len(_txt_files) % _batch_size != 0, this re-uses initial _txt_files
return int(ceil(float(len(self._txt_files)) /float(self._batch_size)))
def read_data_sets(data_dir, batch_size, numcep, numcontext, thread_count=8, limit_dev=0, limit_test=0, limit_train=0):
# Conditionally download data
TED_DATA = "TEDLIUM_release2.tar.gz"
TED_DATA_URL = "http://www.openslr.org/resources/19/TEDLIUM_release2.tar.gz"
local_file = base.maybe_download(TED_DATA, data_dir, TED_DATA_URL)
# Conditionally extract TED data
TED_DIR = "TEDLIUM_release2"
_maybe_extract(data_dir, TED_DIR, local_file)
# Conditionally convert TED sph data to wav
_maybe_convert_wav(data_dir, TED_DIR)
# Conditionally split TED wav data
_maybe_split_wav(data_dir, TED_DIR)
# Conditionally split TED stm data
_maybe_split_stm(data_dir, TED_DIR)
# Create dev DataSet
dev = _read_data_set(data_dir, TED_DIR, "dev", thread_count, batch_size, numcep, numcontext, limit=limit_dev)
# Create test DataSet
test = _read_data_set(data_dir, TED_DIR, "test", thread_count, batch_size, numcep, numcontext, limit=limit_test)
# Create train DataSet
train = _read_data_set(data_dir, TED_DIR, "train", thread_count, batch_size, numcep, numcontext, limit=limit_train)
# Return DataSets
return DataSets(train, dev, test)
def _maybe_extract(data_dir, extracted_data, archive):
# If data_dir/extracted_data does not exist, extract archive in data_dir
if not gfile.Exists(path.join(data_dir, extracted_data)):
tar = tarfile.open(archive)
tar.extractall(data_dir)
tar.close()
def _maybe_convert_wav(data_dir, extracted_data):
# Create extracted_data dir
extracted_dir = path.join(data_dir, extracted_data)
# Conditionally convert dev sph to wav
_maybe_convert_wav_dataset(extracted_dir, "dev")
# Conditionally convert train sph to wav
_maybe_convert_wav_dataset(extracted_dir, "train")
# Conditionally convert test sph to wav
_maybe_convert_wav_dataset(extracted_dir, "test")
def _maybe_convert_wav_dataset(extracted_dir, data_set):
# Create source dir
source_dir = path.join(extracted_dir, data_set, "sph")
# Create target dir
target_dir = path.join(extracted_dir, data_set, "wav")
# Conditionally convert sph files to wav files
if not gfile.Exists(target_dir):
# Create target_dir
makedirs(target_dir)
# Loop over sph files in source_dir and convert each to wav
for sph_file in glob(path.join(source_dir, "*.sph")):
transformer = Transformer()
wav_filename = path.splitext(path.basename(sph_file))[0] + ".wav"
wav_file = path.join(target_dir, wav_filename)
transformer.build(sph_file, wav_file)
remove(sph_file)
# Remove source_dir
rmdir(source_dir)
def _maybe_split_wav(data_dir, extracted_data):
# Create extracted_data dir
extracted_dir = path.join(data_dir, extracted_data)
# Conditionally split dev wav
_maybe_split_wav_dataset(extracted_dir, "dev")
# Conditionally split train wav
_maybe_split_wav_dataset(extracted_dir, "train")
# Conditionally split test wav
_maybe_split_wav_dataset(extracted_dir, "test")
def _maybe_split_wav_dataset(extracted_dir, data_set):
# Create stm dir
stm_dir = path.join(extracted_dir, data_set, "stm")
# Create wav dir
wav_dir = path.join(extracted_dir, data_set, "wav")
# Loop over stm files and split corresponding wav
for stm_file in glob(path.join(stm_dir, "*.stm")):
# Parse stm file
stm_segments = parse_stm_file(stm_file)
# Open wav corresponding to stm_file
wav_filename = path.splitext(path.basename(stm_file))[0] + ".wav"
wav_file = path.join(wav_dir, wav_filename)
origAudio = wave.open(wav_file,'r')
# Loop over stm_segments and split wav_file for each segment
for stm_segment in stm_segments:
# Create wav segment filename
start_time = stm_segment.start_time
stop_time = stm_segment.stop_time
new_wav_filename = path.splitext(path.basename(stm_file))[0] + "-" + str(start_time) + "-" + str(stop_time) + ".wav"
new_wav_file = path.join(wav_dir, new_wav_filename)
# If the wav segment filename does not exist create it
if not gfile.Exists(new_wav_file):
_split_wav(origAudio, start_time, stop_time, new_wav_file)
# Close origAudio
origAudio.close()
# Remove wav_file
remove(wav_file)
def _split_wav(origAudio, start_time, stop_time, new_wav_file):
frameRate = origAudio.getframerate()
origAudio.setpos(int(start_time*frameRate))
chunkData = origAudio.readframes(int((stop_time - start_time)*frameRate))
chunkAudio = wave.open(new_wav_file,'w')
chunkAudio.setnchannels(origAudio.getnchannels())
chunkAudio.setsampwidth(origAudio.getsampwidth())
chunkAudio.setframerate(frameRate)
chunkAudio.writeframes(chunkData)
chunkAudio.close()
def _maybe_split_stm(data_dir, extracted_data):
# Create extracted_data dir
extracted_dir = path.join(data_dir, extracted_data)
# Conditionally split dev stm
_maybe_split_stm_dataset(extracted_dir, "dev")
# Conditionally split train stm
_maybe_split_stm_dataset(extracted_dir, "train")
# Conditionally split test stm
_maybe_split_stm_dataset(extracted_dir, "test")
def _maybe_split_stm_dataset(extracted_dir, data_set):
# Create stm dir
stm_dir = path.join(extracted_dir, data_set, "stm")
# Obtain stm files
stm_files = glob(path.join(stm_dir, "*.stm"))
# Loop over stm files and split each one
for stm_file in stm_files:
# Parse stm file
stm_segments = parse_stm_file(stm_file)
# Loop over stm_segments and create txt file for each one
for stm_segment in stm_segments:
start_time = stm_segment.start_time
stop_time = stm_segment.stop_time
txt_filename = path.splitext(path.basename(stm_file))[0] + "-" + str(start_time) + "-" + str(stop_time) + ".txt"
txt_file = path.join(stm_dir, txt_filename)
# If the txt segment file does not exist create it
if not gfile.Exists(txt_file):
with open(txt_file, "w+") as f:
f.write(stm_segment.transcript)
# Remove stm_file
remove(stm_file)
def _read_data_set(data_dir, extracted_data, data_set, thread_count, batch_size, numcep, numcontext, limit=0):
# Create stm dir
stm_dir = path.join(data_dir, extracted_data, data_set, "stm")
# Obtain list of txt files
txt_files = glob(path.join(stm_dir, "*.txt"))
if limit > 0:
txt_files = txt_files[:limit]
# Return DataSet
return DataSet(txt_files, thread_count, batch_size, numcep, numcontext)
| andrenatal/DeepSpeech | util/importers/ted.py | Python | mpl-2.0 | 11,333 |
import sqlite3
import optparse
import os
import re
def printDownloads(downloadDB):
conn = sqlite3.connect(downloadDB)
c = conn.cursor()
c.execute('SELECT name, source, datetime(endTime/1000000, \'unixepoch\') FROM moz_downloads;')
print '\n[*] --- Files Downloaded --- '
for row in c:
print '[+] File: ' + str(row[0]) + ' from source: ' + str(row[1]) + ' at: ' + str(row[2])
def printCookies(cookiesDB):
try:
conn = sqlite3.connect(cookiesDB)
c = conn.cursor()
c.execute('SELECT host, name, value FROM moz_cookies')
print '\n[*] -- Found Cookies --'
for row in c:
host = str(row[0])
name = str(row[1])
value = str(row[2])
print '[+] Host: ' + host + ', Cookie: ' + name + ', Value: ' + value
except Exception, e:
if 'encrypted' in str(e):
print '\n[*] Error reading your cookies database.'
print '[*] Upgrade your Python-Sqlite3 Library'
def printHistory(placesDB):
try:
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute("select url, datetime(visit_date/1000000, \
'unixepoch') from moz_places, moz_historyvisits \
where visit_count > 0 and moz_places.id==\
moz_historyvisits.place_id: ")
print '\n[*] -- Found History'
for row in c:
url = str(row[0])
date = str(row[1])
print '[+] ' + date + ' - Visited: ' + url
except Exception, e:
if 'encrypted' in str(e):
print '\n[*] Error reading your places database.'
print '[*] Upgrade your Python-Sqlite3 Library'
exit(0)
def printGoogle(placesDB):
conn = sqlite3.connect(placesDB)
c = conn.cursor()
c.execute("select url, datetime(visit_date/1000000,\
'unixepoch') from moz_places, moz_historyvisits \
where visit_count > 0 and moz_places.id== \
moz_historyvisits.place_id;")
print '\n[*] -- Found Google --'
for row in c:
url = str(row[0])
date = str(row[1])
if 'google' in url.lower():
r = re.findall(r'q=.*\&', url)
if r:
search=r[0].split('&')[0]
search=search.replace('q=', '').replace('+', ' ')
print '[+] ' + date + ' - Searched For: ' + search
def main():
parser = optparse.OptionParser("usage%prog -p <firefox profile path> ")
parser.add_option('-p', dest='pathName', type='string', help='specify firefox profile path')
(options, args) = parser.parse_args()
pathName = options.pathName
if pathName == None:
print parser.usage
exit(0)
elif os.path.isdir(pathName) == False:
print '[!] Path Does Not Exist: ' + pathName
exit(0)
else:
downloadDB = os.path.join(pathName, 'downloads.sqlite')
if os.path.isfile(downloadDB):
printDownloads(downloadDB)
else:
print '[!] Firefox Downloads Database does not exist: ' + downloadDB
cookiesDB = os.path.join(pathName, 'cookies.sqlite')
if os.path.isfile(cookiesDB):
printCookies(cookiesDB)
else:
print '[!] Firefox Cookies Database does not exist: ' + cookiesDB
placesDB = os.path.join(pathName, 'places.sqlite')
if os.path.isfile(placesDB):
printHistory(placesDB)
printGoogle(placesDB)
else:
print '[!] Firefox Places Database does not exist: ' + placesDB
if __name__ == '__main__':
main() | tmosleyIII/firefox-data-retrieval | firefox/retriever.py | Python | mit | 3,731 |
"""
minecraft_server.py
Launches the Minecraft Server to generate chunks in a world.
Generates chunks directly into the world, or generates them into
a cached temporary world and copies only the requested chunks.
Also downloads the latest minecraft server and stores it in a
~/.mceditlib/ServerJarStorage directory in the user's home folder.
Can check for updates and download them, and can launch user-supplied
servers once placed in the storage directory.
Very embarrassing code here.
"""
from __future__ import absolute_import
import itertools
import logging
import os
from os.path import dirname, join, basename
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
import urllib
from mceditlib import worldeditor
from mceditlib.exceptions import ChunkNotPresent
from mceditlib.directories import appSupportDir
from mceditlib.util import exhaust
log = logging.getLogger(__name__)
__author__ = 'Rio'
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(f):
return os.path.exists(f) and os.access(f, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class ServerJarStorage(object):
defaultCacheDir = os.path.join(appSupportDir, u"ServerJarStorage")
def __init__(self, cacheDir=None):
if cacheDir is None:
cacheDir = self.defaultCacheDir
self.cacheDir = cacheDir
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and mceditlib to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Beta 1.7.3") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self):
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve("http://www.minecraft.net/download/minecraft_server.jar")
except Exception as e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate:
return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar")
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return hashlib.md5(f.read()).hexdigest()
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1, 2, 3)]
@property
def latestVersion(self):
if len(self.versions) == 0:
return None
return max((v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer()
version = version or self.latestVersion
if version not in self.versions:
return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError):
pass
class VersionNotFound(RuntimeError):
pass
def readProperties(filename):
if not os.path.exists(filename):
return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"], stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception as e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from mceditlib import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound("Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)")
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound("Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(version or "(latest)", self.jarStorage.cacheDir))
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and mceditlib to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "mceditlib_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
# tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion), str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = worldeditor.WorldEditor(tempWorldDir, create=not os.path.exists(tempWorldDir), random_seed=level.RandomSeed)
tempWorld.close()
tempWorldRO = worldeditor.WorldEditor(tempWorldDir, readonly=True)
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorldRO
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return tempWorld, tempDir
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate=False):
tempWorldRW = worldeditor.WorldEditor(tempWorld.filename)
tempWorldRW.getWorldMetadata().Spawn = cx * 16, 64, cz * 16
tempWorldRW.saveChanges()
tempWorldRW.close()
del tempWorldRW
tempWorld.unload()
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stderr.readline().strip()
log.info(line)
yield line
# Forge and FML change stderr output, causing MCServerChunkGenerator to wait endlessly.
#
# Vanilla:
# 2012-11-13 11:29:19 [INFO] Done (9.962s)!
#
# Forge/FML:
# 2012-11-13 11:47:13 [INFO] [Minecraft] Done (8.020s)!
if "[INFO]" in line and "Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = max(8, int(duration) + 1)
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError("Server failed to bind to port!")
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() # reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz):
return
try:
tempChunkBytes = tempWorld._getChunkBytes(cx, cz)
except ChunkNotPresent as e:
raise ChunkNotPresent, "While generating a world in {0} using server {1} ({2!r})".format(tempWorld, self.serverJarFile, e), sys.exc_info()[2]
level.worldFolder.saveChunk(cx, cz, tempChunkBytes)
level._allChunks = None
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, worldeditor.WorldEditor)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate=False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate=False, **kw):
if isinstance(level, basestring):
filename = level
level = worldeditor.WorldEditor(filename, create=True, **kw)
assert isinstance(level, worldeditor.WorldEditor)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx, cz) in enumerate(genPositions):
log.info("Generating at %s", ((cx, cz),))
parentDir = dirname(os.path.abspath(level.worldFolder.filename))
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldFolder.filename)
props["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.close()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate=False):
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
# assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
# boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
# chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i = 0
for cx, cz in itertools.product(
xrange(centercx - maxRadius, centercx + maxRadius),
xrange(centercz - maxRadius, centercz + maxRadius)):
if level.containsChunk(cx, cz):
chunks.discard((cx, cz))
elif ((cx, cz) in chunks
and all(tempWorld.containsChunk(ncx, ncz) for ncx, ncz in itertools.product(xrange(cx-1, cx+2), xrange(cz-1, cz+2)))
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i += 1
chunks.discard((cx, cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveChanges()
def runServer(self, startingDir):
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
log.info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory:
memflags = []
else:
memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
# out, err = proc.communicate()
# for line in err.split("\n"):
while proc.poll() is None:
line = proc.stderr.readline()
if "Preparing start region" in line:
break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass # access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version:
version = version.replace(";)", "") # Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
| Rubisk/mcedit2 | src/mceditlib/minecraft_server.py | Python | bsd-3-clause | 20,638 |
def primes(kmax):
result = []
if kmax > 1000:
kmax = 1000
p = [0] * 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i += 1
if i == k:
p[k] = n
k += 1
result.append(n)
n += 1
return result
| marscher/cython | docs/examples/tutorial/primes/primes.py | Python | apache-2.0 | 328 |
# Copyright (C) 2014 Johnny Vestergaard <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import json
import os
import inspect
# this is needed because we use it in the xml.
import random
import gevent
import gevent.event
from lxml import etree
logger = logging.getLogger(__name__)
class Databus(object):
def __init__(self):
self._data = {}
self._observer_map = {}
self.initialized = gevent.event.Event()
# the idea here is that we can store both values and functions in the key value store
# functions could be used if a profile wants to simulate a sensor, or the function
# could interface with a real sensor
def get_value(self, key):
logger.debug('DataBus: Get value from key: [{0}]'.format(key))
assert key in self._data
item = self._data[key]
if getattr(item, "get_value", None):
# this could potentially generate a context switch, but as long the called method
# does not "callback" the databus we should be fine
return item.get_value()
elif hasattr(item, '__call__'):
return item()
else:
# guaranteed to not generate context switch
return item
def set_value(self, key, value):
logger.debug('DataBus: Storing key: [{0}] value: [{1}]'.format(key, value))
self._data[key] = value
# notify observers
if key in self._observer_map:
gevent.spawn(self.notify_observers, key)
def notify_observers(self, key):
for cb in self._observer_map[key]:
cb(key)
def observe_value(self, key, callback):
assert hasattr(callback, '__call__')
assert len(inspect.getargspec(callback)[0])
if key not in self._observer_map:
self._observer_map[key] = []
self._observer_map[key].append(callback)
def initialize(self, config_file):
self._reset()
logger.debug('Initializing databus using {0}.'.format(config_file))
dom = etree.parse(config_file)
entries = dom.xpath('//core/databus/key_value_mappings/*')
for entry in entries:
key = entry.attrib['name']
value = entry.xpath('./value/text()')[0].strip()
value_type = str(entry.xpath('./value/@type')[0])
assert key not in self._data
logging.debug('Initializing {0} with {1} as a {2}.'.format(key, value, value_type))
if value_type == 'value':
self.set_value(key, eval(value))
elif value_type == 'function':
namespace, _classname = value.rsplit('.', 1)
params = entry.xpath('./value/@param')
module = __import__(namespace, fromlist=[_classname])
_class = getattr(module, _classname)
if len(params) > 0:
# eval param to list
params = eval(params[0])
self.set_value(key, _class(*(tuple(params))))
else:
self.set_value(key, _class())
else:
raise Exception('Unknown value type: {0}'.format(value_type))
self.initialized.set()
def get_shapshot(self):
# takes a snapshot of the internal honeypot state and returns it as json.
snapsnot = {}
for key in self._data.keys():
snapsnot[key] = self.get_value(key)
return json.dumps(snapsnot)
def _reset(self):
logger.debug('Resetting databus.')
self._data.clear()
self._observer_map.clear()
| soltraconpotprojectNLDA/SoltraConpot | conpot/core/databus.py | Python | gpl-2.0 | 4,277 |
from rider.core import HTTP_200
class ResponseSetter(object):
status = HTTP_200
content_type = 'text/plain'
response_type = 'body'
location = None
content_wrapper = None
def __init__(self, *args, **kwargs):
self.content = None
super(ResponseSetter, self).__init__(*args, **kwargs)
def set_response(self, response):
response.status = self.status
response.content_type = self.content_type
self.content_type = self.__class__.content_type
if self.location is not None:
response.location = self.location
self.location = self.__class__.location
if self.content is not None:
setattr(
response, self.response_type,
self.content if self.content_wrapper is None else self.content_wrapper.__func__(self.content)
)
self.content = None
| riderframework/rider | rider/views/response.py | Python | apache-2.0 | 906 |
#Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
class Solution(object):
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
i=0
j=len(numbers)-1
while i<j:
if numbers[i]+numbers[j]<target:
i+=1
elif numbers[i]+numbers[j]>target:
j-=1
else:
return [i+1,j+1] | 95subodh/Leetcode | 167. Two Sum II - Input array is sorted.py | Python | mit | 437 |
import datetime
import json
import struct
import zlib
import pytest
from pygelf import gelf
class _ObjWithStr:
def __str__(self):
return 'str'
class _ObjWithRepr:
def __repr__(self):
return 'repr'
_now = datetime.datetime.now()
@pytest.mark.parametrize(
('obj', 'expected'),
[
(_ObjWithStr(), 'str'),
(_ObjWithRepr(), 'repr'),
(_now, _now.isoformat()),
(_now.time(), _now.time().isoformat()),
(_now.date(), _now.date().isoformat()),
]
)
def test_object_to_json(obj, expected):
result = gelf.object_to_json(obj)
assert result == expected
@pytest.mark.parametrize('compress', [True, False])
def test_pack(compress):
message = {'version': '1.1', 'short_message': 'test pack', 'foo': _ObjWithStr()}
expected = json.loads(json.dumps(message, default=str))
packed_message = gelf.pack(message, compress, default=str)
unpacked_message = zlib.decompress(packed_message) if compress else packed_message
unpacked_message = json.loads(unpacked_message.decode('utf-8'))
assert expected == unpacked_message
def test_split():
message = b'12345'
header = b'\x1e\x0f'
chunks = list(gelf.split(message, 2))
expected = [
(struct.pack('b', 0), struct.pack('b', 3), b'12'),
(struct.pack('b', 1), struct.pack('b', 3), b'34'),
(struct.pack('b', 2), struct.pack('b', 3), b'5')
]
assert len(chunks) == len(expected)
for index, chunk in enumerate(chunks):
expected_index, expected_chunks_count, expected_chunk = expected[index]
assert chunk[:2] == header
assert chunk[10:11] == expected_index
assert chunk[11:12] == expected_chunks_count
assert chunk[12:] == expected_chunk
| keeprocking/pygelf | tests/test_core_functions.py | Python | mit | 1,764 |
# -*- coding: utf-8 -*-
import Image
import os
import sys
import ImageEnhance
import argparse
import common
# utils.py
import utils
from utils import encodeChinese
# config.py
import config
parser = argparse.ArgumentParser(description = encodeChinese('进行亮度整理'))
parser.add_argument('--src', action='store', dest='src_image_dir',
help = encodeChinese('待处理贴图路径,比如D:\\tmp,也可以是一个包含贴图路径的txt文件,比如D:\\tmp\\texlist.txt,所有贴图必须是绝对路径'))
parser.add_argument('--dst', action='store', dest='dst_image_dir',
help = encodeChinese('处理后贴图路径'))
parser.add_argument('--brightness', action='store',dest='bright',
help = encodeChinese('设定亮度值'))
parser.add_argument('--contrast',action='store',dest='contrast',
help = encodeChinese('设定对比度'))
parser.add_argument('--donotwait', action='store_true', dest='notwait',
help= encodeChinese('程序结束之后是否等待用户按任意键,如果需要将本程序继承到其他工具集中,可以考虑打开donotwait开关'))
parser.add_argument('--debug', action='store_true', dest='isdebug',
help= encodeChinese('调试程序的开关'))
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
# 判断必须给定的参数
if args.src_image_dir is None :
print encodeChinese('Error : 没有输入待处理贴图路径!\n<按任意键退出>')
os.system('pause>nul')
sys.exit()
if args.dst_image_dir is None:
print encodeChinese('Error: 没有输入处理后贴图路径! \n<按任意键退出>')
os.system('pause>nul')
sys.exit()
if args.bright is None:
print encodeChinese('Error: 没有输入亮度值! \n<按任意键退出>')
os.system('pause>nul')
sys.exit()
if args.contrast is None:
print encodeChinese('Error:没有输入对比度! \n<按任意键退出>')
os.system('pause>nul')
sys.exit()
if args.isdebug is True :
config.DEBUG = True
else :
config.DEBUG = False
#判断接受到的路径是否为目录
def judgeDir(path):
return os.path.isdir(path)
rootDir = args.src_image_dir
# targetDir = rootDir.split('src\\')[0]
targetDir = args.dst_image_dir
brightnessValue = args.bright
def changeBrightness(img,brightnessValue):
brightnessValue = 1 + 0.01 * float(brightnessValue)
if img.mode == 'RGB':
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightnessValue)
return img
elif img.mode == 'RGBA':
data = img.getdata()
enhancer = ImageEnhance.Brightness(img)
img = enhancer.enhance(brightnessValue)
newdata = img.getdata()
truedata = list()
i = 0
for item in newdata:
truedata.append((item[0],item[1],item[2],data[i][3]))
i += 1
img.putdata(truedata)
return img
#改变图片对比度
def changeContrast(img,contrastValue):
#获取图片对比度
myContrast = ImageEnhance.Contrast(img)
contrast_img = myContrast.enhance(float(contrastValue))
return contrast_img
def loop(rootdir, brightnessValue):
# src is a dir
if judgeDir(rootdir):
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
fName = filename
filename = parent + os.sep + fName
try:
img = Image.open(filename)
except:
print str(filename) + '\t' + encodeChinese('不是贴图')
continue
if img.mode.lower() == 'rgb' and fName.find('sky') == -1:
newimg = changeBrightness(img,brightnessValue)
try:
newimg.save(targetDir + os.sep + fName, quality = 100)
#print targetDir + fName
except:
os.makedirs(targetDir)
#print 'os.mkdir'
newimg.save(targetDir + os.sep + fName, quality = 100)
# src is a txt file, containing all textures path.
else:
file_path = open(rootdir,'r')
file_info = file_path.read()
#print file_info
for fileline in file_info.split('\n'):
if len(fileline) != 0:
#依次获取文件名,亮度值,对比度数值
filename,brightnessValue,contrastValue = fileline.split('$')
#print filename,brightnessValue
utils.spdebug(filename)
utils.spdebug("brightness : " + brightnessValue)
utils.spdebug("contrast : " + contrastValue)
if os.path.exists(filename) is False :
print str(filename) + '\t' + encodeChinese('文件不存在')
continue
try:
img = Image.open(filename)
except:
print str(filename) + '\t' + encodeChinese('ImageEnhance库打开图片文件时候抛出异常!')
continue
if img.mode.lower() == 'rgb' or img.mode.lower() == 'rgba':
newimg = changeBrightness(img,brightnessValue)
newimg = changeContrast(newimg,contrastValue)
try:
newimg.save(filename, quality = 100)
utils.spdebug(filename + ' is ok!')
except:
os.makedirs(targetDir)
#print 'os.mkdir'
newimg.save(filename, quality = 100)
loop(rootDir, brightnessValue)
if args.notwait is False :
print common.encodeChinese('亮度设置成功!点任意键继续!')
common.pause() | xxd3vin/spp-sdk | tools/script/python/setBrightness.py | Python | mit | 5,105 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Comments and reviews for records """
__revision__ = "$Id$"
# non Invenio imports:
import time
import math
import os
import cgi
import re
from datetime import datetime, timedelta
# Invenio imports:
from invenio.dbquery import run_sql
from invenio.config import CFG_PREFIX, \
CFG_SITE_LANG, \
CFG_WEBALERT_ALERT_ENGINE_EMAIL,\
CFG_SITE_SUPPORT_EMAIL,\
CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,\
CFG_SITE_URL,\
CFG_SITE_NAME,\
CFG_WEBCOMMENT_ALLOW_REVIEWS,\
CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS,\
CFG_WEBCOMMENT_ALLOW_COMMENTS,\
CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL,\
CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN,\
CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS,\
CFG_WEBCOMMENT_DEFAULT_MODERATOR, \
CFG_SITE_RECORD, \
CFG_WEBCOMMENT_EMAIL_REPLIES_TO, \
CFG_WEBCOMMENT_ROUND_DATAFIELD, \
CFG_WEBCOMMENT_RESTRICTION_DATAFIELD, \
CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH
from invenio.webmessage_mailutils import \
email_quote_txt, \
email_quoted_txt2html
from invenio.htmlutils import tidy_html
from invenio.webuser import get_user_info, get_email, collect_user_info
from invenio.dateutils import convert_datetext_to_dategui, \
datetext_default, \
convert_datestruct_to_datetext
from invenio.mailutils import send_email
from invenio.errorlib import register_exception
from invenio.messages import wash_language, gettext_set_language
from invenio.urlutils import wash_url_argument
from invenio.webcomment_config import CFG_WEBCOMMENT_ACTION_CODE, \
InvenioWebCommentError, \
InvenioWebCommentWarning
from invenio.access_control_engine import acc_authorize_action
from invenio.search_engine import \
guess_primary_collection_of_a_record, \
check_user_can_view_record, \
get_collection_reclist, \
get_colID
from invenio.search_engine_utils import get_fieldvalues
from invenio.webcomment_washer import EmailWasher
try:
import invenio.template
webcomment_templates = invenio.template.load('webcomment')
except:
pass
def perform_request_display_comments_or_remarks(req, recID, display_order='od', display_since='all', nb_per_page=100, page=1, ln=CFG_SITE_LANG, voted=-1, reported=-1, subscribed=0, reviews=0, uid=-1, can_send_comments=False, can_attach_files=False, user_is_subscribed_to_discussion=False, user_can_unsubscribe_from_discussion=False, display_comment_rounds=None):
"""
Returns all the comments (reviews) of a specific internal record or external basket record.
@param recID: record id where (internal record IDs > 0) or (external basket record IDs < -100)
@param display_order: hh = highest helpful score, review only
lh = lowest helpful score, review only
hs = highest star score, review only
ls = lowest star score, review only
od = oldest date
nd = newest date
@param display_since: all= no filtering by date
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit integer between 0 and 9
@param nb_per_page: number of results per page
@param page: results page
@param voted: boolean, active if user voted for a review, see perform_request_vote function
@param reported: boolean, active if user reported a certain comment/review, perform_request_report function
@param subscribed: int, 1 if user just subscribed to discussion, -1 if unsubscribed
@param reviews: boolean, enabled if reviews, disabled for comments
@param uid: the id of the user who is reading comments
@param can_send_comments: if user can send comment or not
@param can_attach_files: if user can attach file to comment or not
@param user_is_subscribed_to_discussion: True if user already receives new comments by email
@param user_can_unsubscribe_from_discussion: True is user is allowed to unsubscribe from discussion
@return html body.
"""
_ = gettext_set_language(ln)
warnings = []
nb_reviews = 0
nb_comments = 0
# wash arguments
recID = wash_url_argument(recID, 'int')
ln = wash_language(ln)
display_order = wash_url_argument(display_order, 'str')
display_since = wash_url_argument(display_since, 'str')
nb_per_page = wash_url_argument(nb_per_page, 'int')
page = wash_url_argument(page, 'int')
voted = wash_url_argument(voted, 'int')
reported = wash_url_argument(reported, 'int')
reviews = wash_url_argument(reviews, 'int')
# vital argument check
(valid, error_body) = check_recID_is_in_range(recID, warnings, ln)
if not(valid):
return error_body
# CERN hack begins: filter out ATLAS comments
from invenio.config import CFG_CERN_SITE
if CFG_CERN_SITE:
restricted_comments_p = False
for report_number in get_fieldvalues(recID, '088__a'):
if report_number.startswith("ATL-"):
restricted_comments_p = True
break
if restricted_comments_p:
err_code, err_msg = acc_authorize_action(uid, 'viewrestrcoll',
collection='ATLAS Communications')
if err_code:
return err_msg
# CERN hack ends
# Query the database and filter results
user_info = collect_user_info(uid)
res = query_retrieve_comments_or_remarks(recID, display_order, display_since, reviews, user_info=user_info)
# res2 = query_retrieve_comments_or_remarks(recID, display_order, display_since, not reviews, user_info=user_info)
nb_res = len(res)
from invenio.webcommentadminlib import get_nb_reviews, get_nb_comments
nb_reviews = get_nb_reviews(recID, count_deleted=False)
nb_comments = get_nb_comments(recID, count_deleted=False)
# checking non vital arguemnts - will be set to default if wrong
#if page <= 0 or page.lower() != 'all':
if page < 0:
page = 1
try:
raise InvenioWebCommentWarning(_('Bad page number --> showing first page.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_INVALID_PAGE_NB',))
if nb_per_page < 0:
nb_per_page = 100
try:
raise InvenioWebCommentWarning(_('Bad number of results per page --> showing 10 results per page.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_INVALID_NB_RESULTS_PER_PAGE',))
if CFG_WEBCOMMENT_ALLOW_REVIEWS and reviews:
if display_order not in ['od', 'nd', 'hh', 'lh', 'hs', 'ls']:
display_order = 'hh'
try:
raise InvenioWebCommentWarning(_('Bad display order --> showing most helpful first.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_INVALID_REVIEW_DISPLAY_ORDER',))
else:
if display_order not in ['od', 'nd']:
display_order = 'od'
try:
raise InvenioWebCommentWarning(_('Bad display order --> showing oldest first.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_INVALID_DISPLAY_ORDER',))
if not display_comment_rounds:
display_comment_rounds = []
# filter results according to page and number of reults per page
if nb_per_page > 0:
if nb_res > 0:
last_page = int(math.ceil(nb_res / float(nb_per_page)))
else:
last_page = 1
if page > last_page:
page = 1
try:
raise InvenioWebCommentWarning(_('Bad page number --> showing first page.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(("WRN_WEBCOMMENT_INVALID_PAGE_NB",))
if nb_res > nb_per_page: # if more than one page of results
if page < last_page:
res = res[(page-1)*(nb_per_page) : (page*nb_per_page)]
else:
res = res[(page-1)*(nb_per_page) : ]
else: # one page of results
pass
else:
last_page = 1
# Send to template
avg_score = 0.0
if not CFG_WEBCOMMENT_ALLOW_COMMENTS and not CFG_WEBCOMMENT_ALLOW_REVIEWS: # comments not allowed by admin
try:
raise InvenioWebCommentError(_('Comments on records have been disallowed by the administrator.'))
except InvenioWebCommentError, exc:
register_exception(req=req)
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
# errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
if reported > 0:
try:
raise InvenioWebCommentWarning(_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED',))
elif reported == 0:
try:
raise InvenioWebCommentWarning(_('You have already reported an abuse for this comment.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_ALREADY_REPORTED',))
elif reported == -2:
try:
raise InvenioWebCommentWarning(_('The comment you have reported no longer exists.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_INVALID_REPORT',))
if CFG_WEBCOMMENT_ALLOW_REVIEWS and reviews:
avg_score = calculate_avg_score(res)
if voted > 0:
try:
raise InvenioWebCommentWarning(_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED',))
elif voted == 0:
try:
raise InvenioWebCommentWarning(_('Sorry, you have already voted. This vote has not been recorded.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_ALREADY_VOTED',))
if subscribed == 1:
try:
raise InvenioWebCommentWarning(_('You have been subscribed to this discussion. From now on, you will receive an email whenever a new comment is posted.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_SUBSCRIBED',))
elif subscribed == -1:
try:
raise InvenioWebCommentWarning(_('You have been unsubscribed from this discussion.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_UNSUBSCRIBED',))
grouped_comments = group_comments_by_round(res, reviews)
# Clean list of comments round names
if not display_comment_rounds:
display_comment_rounds = []
elif 'all' in display_comment_rounds:
display_comment_rounds = [cmtgrp[0] for cmtgrp in grouped_comments]
elif 'latest' in display_comment_rounds:
if grouped_comments:
display_comment_rounds.append(grouped_comments[-1][0])
display_comment_rounds.remove('latest')
body = webcomment_templates.tmpl_get_comments(req,
recID,
ln,
nb_per_page, page, last_page,
display_order, display_since,
CFG_WEBCOMMENT_ALLOW_REVIEWS,
grouped_comments, nb_comments, avg_score,
warnings,
border=0,
reviews=reviews,
total_nb_reviews=nb_reviews,
uid=uid,
can_send_comments=can_send_comments,
can_attach_files=can_attach_files,
user_is_subscribed_to_discussion=\
user_is_subscribed_to_discussion,
user_can_unsubscribe_from_discussion=\
user_can_unsubscribe_from_discussion,
display_comment_rounds=display_comment_rounds)
return body
def perform_request_vote(cmt_id, client_ip_address, value, uid=-1):
"""
Vote positively or negatively for a comment/review
@param cmt_id: review id
@param value: +1 for voting positively
-1 for voting negatively
@return: integer 1 if successful, integer 0 if not
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
value = wash_url_argument(value, 'int')
uid = wash_url_argument(uid, 'int')
if cmt_id > 0 and value in [-1, 1] and check_user_can_vote(cmt_id, client_ip_address, uid):
action_date = convert_datestruct_to_datetext(time.localtime())
action_code = CFG_WEBCOMMENT_ACTION_CODE['VOTE']
query = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT,
id_bibrec, id_user, client_host, action_time,
action_code)
VALUES (%s, NULL ,%s, inet_aton(%s), %s, %s)"""
params = (cmt_id, uid, client_ip_address, action_date, action_code)
run_sql(query, params)
return query_record_useful_review(cmt_id, value)
else:
return 0
def check_user_can_comment(recID, client_ip_address, uid=-1):
""" Check if a user hasn't already commented within the last seconds
time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS
@param recID: record id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
recID = wash_url_argument(recID, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
max_action_time = time.time() - CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_COMMENTS_IN_SECONDS
max_action_time = convert_datestruct_to_datetext(time.localtime(max_action_time))
action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_COMMENT']
query = """SELECT id_bibrec
FROM cmtACTIONHISTORY
WHERE id_bibrec=%s AND
action_code=%s AND
action_time>%s
"""
params = (recID, action_code, max_action_time)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return len(res) == 0
def check_user_can_review(recID, client_ip_address, uid=-1):
""" Check if a user hasn't already reviewed within the last seconds
time limit: CFG_WEBCOMMENT_TIMELIMIT_PROCESSING_REVIEWS_IN_SECONDS
@param recID: record ID
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
action_code = CFG_WEBCOMMENT_ACTION_CODE['ADD_REVIEW']
query = """SELECT id_bibrec
FROM cmtACTIONHISTORY
WHERE id_bibrec=%s AND
action_code=%s
"""
params = (recID, action_code)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return len(res) == 0
def check_user_can_vote(cmt_id, client_ip_address, uid=-1):
""" Checks if a user hasn't already voted
@param cmt_id: comment id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
query = """SELECT id_cmtRECORDCOMMENT
FROM cmtACTIONHISTORY
WHERE id_cmtRECORDCOMMENT=%s"""
params = (cmt_id,)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid, )
res = run_sql(query, params)
return (len(res) == 0)
def get_comment_collection(cmt_id):
"""
Extract the collection where the comment is written
"""
query = "SELECT id_bibrec FROM cmtRECORDCOMMENT WHERE id=%s"
recid = run_sql(query, (cmt_id,))
record_primary_collection = guess_primary_collection_of_a_record(recid[0][0])
return record_primary_collection
def get_collection_moderators(collection):
"""
Return the list of comment moderators for the given collection.
"""
from invenio.access_control_engine import acc_get_authorized_emails
res = list(acc_get_authorized_emails('moderatecomments', collection=collection))
if not res:
return [CFG_WEBCOMMENT_DEFAULT_MODERATOR,]
return res
def perform_request_report(cmt_id, client_ip_address, uid=-1):
"""
Report a comment/review for inappropriate content.
Will send an email to the administrator if number of reports is a multiple of CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN
@param cmt_id: comment id
@return: integer 1 if successful, integer 0 if not. -2 if comment does not exist
"""
cmt_id = wash_url_argument(cmt_id, 'int')
if cmt_id <= 0:
return 0
(query_res, nb_abuse_reports) = query_record_report_this(cmt_id)
if query_res == 0:
return 0
elif query_res == -2:
return -2
if not(check_user_can_report(cmt_id, client_ip_address, uid)):
return 0
action_date = convert_datestruct_to_datetext(time.localtime())
action_code = CFG_WEBCOMMENT_ACTION_CODE['REPORT_ABUSE']
query = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT, id_bibrec,
id_user, client_host, action_time, action_code)
VALUES (%s, NULL, %s, inet_aton(%s), %s, %s)"""
params = (cmt_id, uid, client_ip_address, action_date, action_code)
run_sql(query, params)
if nb_abuse_reports % CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN == 0:
(cmt_id2,
id_bibrec,
id_user,
cmt_body,
cmt_date,
cmt_star,
cmt_vote, cmt_nb_votes_total,
cmt_title,
cmt_reported,
round_name,
restriction) = query_get_comment(cmt_id)
(user_nb_abuse_reports,
user_votes,
user_nb_votes_total) = query_get_user_reports_and_votes(int(id_user))
(nickname, user_email, last_login) = query_get_user_contact_info(id_user)
from_addr = '%s Alert Engine <%s>' % (CFG_SITE_NAME, CFG_WEBALERT_ALERT_ENGINE_EMAIL)
comment_collection = get_comment_collection(cmt_id)
to_addrs = get_collection_moderators(comment_collection)
subject = "A comment has been reported as inappropriate by a user"
body = '''
The following comment has been reported a total of %(cmt_reported)s times.
Author: nickname = %(nickname)s
email = %(user_email)s
user_id = %(uid)s
This user has:
total number of reports = %(user_nb_abuse_reports)s
%(votes)s
Comment: comment_id = %(cmt_id)s
record_id = %(id_bibrec)s
date written = %(cmt_date)s
nb reports = %(cmt_reported)s
%(review_stuff)s
body =
---start body---
%(cmt_body)s
---end body---
Please go to the record page %(comment_admin_link)s to delete this message if necessary. A warning will be sent to the user in question.''' % \
{ 'cfg-report_max' : CFG_WEBCOMMENT_NB_REPORTS_BEFORE_SEND_EMAIL_TO_ADMIN,
'nickname' : nickname,
'user_email' : user_email,
'uid' : id_user,
'user_nb_abuse_reports' : user_nb_abuse_reports,
'user_votes' : user_votes,
'votes' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
"total number of positive votes\t= %s\n\t\ttotal number of negative votes\t= %s" % \
(user_votes, (user_nb_votes_total - user_votes)) or "\n",
'cmt_id' : cmt_id,
'id_bibrec' : id_bibrec,
'cmt_date' : cmt_date,
'cmt_reported' : cmt_reported,
'review_stuff' : CFG_WEBCOMMENT_ALLOW_REVIEWS and \
"star score\t= %s\n\treview title\t= %s" % (cmt_star, cmt_title) or "",
'cmt_body' : cmt_body,
'comment_admin_link' : CFG_SITE_URL + "/"+ CFG_SITE_RECORD +"/" + str(id_bibrec) + '/comments#' + str(cmt_id),
'user_admin_link' : "user_admin_link" #! FIXME
}
#FIXME to be added to email when websession module is over:
#If you wish to ban the user, you can do so via the User Admin Panel %(user_admin_link)s.
send_email(from_addr, to_addrs, subject, body)
return 1
def check_user_can_report(cmt_id, client_ip_address, uid=-1):
""" Checks if a user hasn't already reported a comment
@param cmt_id: comment id
@param client_ip_address: IP => use: str(req.remote_ip)
@param uid: user id, as given by invenio.webuser.getUid(req)
"""
cmt_id = wash_url_argument(cmt_id, 'int')
client_ip_address = wash_url_argument(client_ip_address, 'str')
uid = wash_url_argument(uid, 'int')
query = """SELECT id_cmtRECORDCOMMENT
FROM cmtACTIONHISTORY
WHERE id_cmtRECORDCOMMENT=%s"""
params = (uid,)
if uid < 0:
query += " AND client_host=inet_aton(%s)"
params += (client_ip_address,)
else:
query += " AND id_user=%s"
params += (uid,)
res = run_sql(query, params)
return (len(res) == 0)
def query_get_user_contact_info(uid):
"""
Get the user contact information
@return: tuple (nickname, email, last_login), if none found return ()
Note: for the moment, if no nickname, will return email address up to the '@'
"""
query1 = """SELECT nickname, email,
DATE_FORMAT(last_login, '%%Y-%%m-%%d %%H:%%i:%%s')
FROM user WHERE id=%s"""
params1 = (uid,)
res1 = run_sql(query1, params1)
if res1:
return res1[0]
else:
return ()
def query_get_user_reports_and_votes(uid):
"""
Retrieve total number of reports and votes of a particular user
@param uid: user id
@return: tuple (total_nb_reports, total_nb_votes_yes, total_nb_votes_total)
if none found return ()
"""
query1 = """SELECT nb_votes_yes,
nb_votes_total,
nb_abuse_reports
FROM cmtRECORDCOMMENT
WHERE id_user=%s"""
params1 = (uid,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return ()
nb_votes_yes = nb_votes_total = nb_abuse_reports = 0
for cmt_tuple in res1:
nb_votes_yes += int(cmt_tuple[0])
nb_votes_total += int(cmt_tuple[1])
nb_abuse_reports += int(cmt_tuple[2])
return (nb_abuse_reports, nb_votes_yes, nb_votes_total)
def query_get_comment(comID):
"""
Get all fields of a comment
@param comID: comment id
@return: tuple (comID, id_bibrec, id_user, body, date_creation, star_score, nb_votes_yes, nb_votes_total, title, nb_abuse_reports, round_name, restriction)
if none found return ()
"""
query1 = """SELECT id,
id_bibrec,
id_user,
body,
DATE_FORMAT(date_creation, '%%Y-%%m-%%d %%H:%%i:%%s'),
star_score,
nb_votes_yes,
nb_votes_total,
title,
nb_abuse_reports,
round_name,
restriction
FROM cmtRECORDCOMMENT
WHERE id=%s"""
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1)>0:
return res1[0]
else:
return ()
def query_record_report_this(comID):
"""
Increment the number of reports for a comment
@param comID: comment id
@return: tuple (success, new_total_nb_reports_for_this_comment) where
success is integer 1 if success, integer 0 if not, -2 if comment does not exist
"""
#retrieve nb_abuse_reports
query1 = "SELECT nb_abuse_reports FROM cmtRECORDCOMMENT WHERE id=%s"
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1) == 0:
return (-2, 0)
#increment and update
nb_abuse_reports = int(res1[0][0]) + 1
query2 = "UPDATE cmtRECORDCOMMENT SET nb_abuse_reports=%s WHERE id=%s"
params2 = (nb_abuse_reports, comID)
res2 = run_sql(query2, params2)
return (int(res2), nb_abuse_reports)
def query_record_useful_review(comID, value):
"""
private funciton
Adjust the number of useful votes and number of total votes for a comment.
@param comID: comment id
@param value: +1 or -1
@return: integer 1 if successful, integer 0 if not
"""
# retrieve nb_useful votes
query1 = "SELECT nb_votes_total, nb_votes_yes FROM cmtRECORDCOMMENT WHERE id=%s"
params1 = (comID,)
res1 = run_sql(query1, params1)
if len(res1)==0:
return 0
# modify and insert new nb_useful votes
nb_votes_yes = int(res1[0][1])
if value >= 1:
nb_votes_yes = int(res1[0][1]) + 1
nb_votes_total = int(res1[0][0]) + 1
query2 = "UPDATE cmtRECORDCOMMENT SET nb_votes_total=%s, nb_votes_yes=%s WHERE id=%s"
params2 = (nb_votes_total, nb_votes_yes, comID)
res2 = run_sql(query2, params2)
return int(res2)
def query_retrieve_comments_or_remarks(recID, display_order='od', display_since='0000-00-00 00:00:00',
ranking=0, limit='all', user_info=None):
"""
Private function
Retrieve tuple of comments or remarks from the database
@param recID: record id
@param display_order: hh = highest helpful score
lh = lowest helpful score
hs = highest star score
ls = lowest star score
od = oldest date
nd = newest date
@param display_since: datetime, e.g. 0000-00-00 00:00:00
@param ranking: boolean, enabled if reviews, disabled for comments
@param limit: number of comments/review to return
@return: tuple of comment where comment is
tuple (nickname, uid, date_creation, body, status, id) if ranking disabled or
tuple (nickname, uid, date_creation, body, status, nb_votes_yes, nb_votes_total, star_score, title, id)
Note: for the moment, if no nickname, will return email address up to '@'
"""
display_since = calculate_start_date(display_since)
order_dict = { 'hh' : "cmt.nb_votes_yes/(cmt.nb_votes_total+1) DESC, cmt.date_creation DESC ",
'lh' : "cmt.nb_votes_yes/(cmt.nb_votes_total+1) ASC, cmt.date_creation ASC ",
'ls' : "cmt.star_score ASC, cmt.date_creation DESC ",
'hs' : "cmt.star_score DESC, cmt.date_creation DESC ",
'nd' : "cmt.reply_order_cached_data DESC ",
'od' : "cmt.reply_order_cached_data ASC "
}
# Ranking only done for comments and when allowed
if ranking and recID > 0:
try:
display_order = order_dict[display_order]
except:
display_order = order_dict['od']
else:
# in case of recID > 0 => external record => no ranking!
ranking = 0
try:
if display_order[-1] == 'd':
display_order = order_dict[display_order]
else:
display_order = order_dict['od']
except:
display_order = order_dict['od']
#display_order = order_dict['nd']
query = """SELECT user.nickname,
cmt.id_user,
DATE_FORMAT(cmt.date_creation, '%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s'),
cmt.body,
cmt.status,
cmt.nb_abuse_reports,
%(ranking)s cmt.id,
cmt.round_name,
cmt.restriction,
%(reply_to_column)s
FROM cmtRECORDCOMMENT cmt LEFT JOIN user ON
user.id=cmt.id_user
WHERE cmt.id_bibrec=%%s
%(ranking_only)s
%(display_since)s
ORDER BY %(display_order)s
""" % {'ranking' : ranking and ' cmt.nb_votes_yes, cmt.nb_votes_total, cmt.star_score, cmt.title, ' or '',
'ranking_only' : ranking and ' AND cmt.star_score>0 ' or ' AND cmt.star_score=0 ',
# 'id_bibrec' : recID > 0 and 'cmt.id_bibrec' or 'cmt.id_bibrec_or_bskEXTREC',
# 'table' : recID > 0 and 'cmtRECORDCOMMENT' or 'bskRECORDCOMMENT',
'display_since' : display_since == '0000-00-00 00:00:00' and ' ' or 'AND cmt.date_creation>=\'%s\' ' % display_since,
'display_order': display_order,
'reply_to_column': recID > 0 and 'cmt.in_reply_to_id_cmtRECORDCOMMENT' or 'cmt.in_reply_to_id_bskRECORDCOMMENT'}
params = (recID,)
res = run_sql(query, params)
# return res
new_limit = limit
comments_list = []
for row in res:
if ranking:
# when dealing with reviews, row[12] holds restriction info:
restriction = row[12]
else:
# when dealing with comments, row[8] holds restriction info:
restriction = row[8]
if user_info and check_user_can_view_comment(user_info, None, restriction)[0] != 0:
# User cannot view comment. Look further
continue
comments_list.append(row)
if limit.isdigit():
new_limit -= 1
if limit < 1:
break
if comments_list:
if limit.isdigit():
return comments_list[:limit]
else:
return comments_list
return ()
## def get_comment_children(comID):
## """
## Returns the list of children (i.e. direct descendants) ordered by time of addition.
## @param comID: the ID of the comment for which we want to retrieve children
## @type comID: int
## @return the list of children
## @rtype: list
## """
## res = run_sql("SELECT id FROM cmtRECORDCOMMENT WHERE in_reply_to_id_cmtRECORDCOMMENT=%s", (comID,))
## return [row[0] for row in res]
## def get_comment_descendants(comID, depth=None):
## """
## Returns the list of descendants of the given comment, orderd from
## oldest to newest ("top-down"), down to depth specified as parameter.
## @param comID: the ID of the comment for which we want to retrieve descendant
## @type comID: int
## @param depth: the max depth down to which we want to retrieve
## descendants. Specify None for no limit, 1 for direct
## children only, etc.
## @return the list of ancestors
## @rtype: list(tuple(comment ID, descendants comments IDs))
## """
## if depth == 0:
## return (comID, [])
## res = run_sql("SELECT id FROM cmtRECORDCOMMENT WHERE in_reply_to_id_cmtRECORDCOMMENT=%s", (comID,))
## if res:
## children_comID = [row[0] for row in res]
## children_descendants = []
## if depth:
## depth -= 1
## children_descendants = [get_comment_descendants(child_comID, depth) for child_comID in children_comID]
## return (comID, children_descendants)
## else:
## return (comID, [])
def get_comment_ancestors(comID, depth=None):
"""
Returns the list of ancestors of the given comment, ordered from
oldest to newest ("top-down": direct parent of comID is at last position),
up to given depth
@param comID: the ID of the comment for which we want to retrieve ancestors
@type comID: int
@param depth: the maximum of levels up from the given comment we
want to retrieve ancestors. None for no limit, 1 for
direct parent only, etc.
@type depth: int
@return the list of ancestors
@rtype: list
"""
if depth == 0:
return []
res = run_sql("SELECT in_reply_to_id_cmtRECORDCOMMENT FROM cmtRECORDCOMMENT WHERE id=%s", (comID,))
if res:
parent_comID = res[0][0]
if parent_comID == 0:
return []
parent_ancestors = []
if depth:
depth -= 1
parent_ancestors = get_comment_ancestors(parent_comID, depth)
parent_ancestors.append(parent_comID)
return parent_ancestors
else:
return []
def get_reply_order_cache_data(comid):
"""
Prepare a representation of the comment ID given as parameter so
that it is suitable for byte ordering in MySQL.
"""
return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256),
chr((comid >> 8) % 256), chr(comid % 256))
def query_add_comment_or_remark(reviews=0, recID=0, uid=-1, msg="",
note="", score=0, priority=0,
client_ip_address='', editor_type='textarea',
req=None, reply_to=None, attached_files=None):
"""
Private function
Insert a comment/review or remarkinto the database
@param recID: record id
@param uid: user id
@param msg: comment body
@param note: comment title
@param score: review star score
@param priority: remark priority #!FIXME
@param editor_type: the kind of editor used to submit the comment: 'textarea', 'ckeditor'
@param req: request object. If provided, email notification are sent after we reply to user request.
@param reply_to: the id of the comment we are replying to with this inserted comment.
@return: integer >0 representing id if successful, integer 0 if not
"""
current_date = calculate_start_date('0d')
#change utf-8 message into general unicode
msg = msg.decode('utf-8')
note = note.decode('utf-8')
#change general unicode back to utf-8
msg = msg.encode('utf-8')
note = note.encode('utf-8')
msg_original = msg
(restriction, round_name) = get_record_status(recID)
if attached_files is None:
attached_files = {}
if reply_to and CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH >= 0:
# Check that we have not reached max depth
comment_ancestors = get_comment_ancestors(reply_to)
if len(comment_ancestors) >= CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH:
if CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH == 0:
reply_to = None
else:
reply_to = comment_ancestors[CFG_WEBCOMMENT_MAX_COMMENT_THREAD_DEPTH - 1]
# Inherit restriction and group/round of 'parent'
comment = query_get_comment(reply_to)
if comment:
(round_name, restriction) = comment[10:12]
if editor_type == 'ckeditor':
# Here we remove the line feeds introduced by CKEditor (they
# have no meaning for the user) and replace the HTML line
# breaks by linefeeds, so that we are close to an input that
# would be done without the CKEditor. That's much better if a
# reply to a comment is made with a browser that does not
# support CKEditor.
msg = msg.replace('\n', '').replace('\r', '')
# We clean the quotes that could have been introduced by
# CKEditor when clicking the 'quote' button, as well as those
# that we have introduced when quoting the original message.
# We can however not use directly '>>' chars to quote, as it
# will be washed/fixed when calling tidy_html(): double-escape
# all > first, and use >>
msg = msg.replace('>', '&gt;')
msg = re.sub('<blockquote.*?>\s*<(p|div).*?>', '>>', msg)
msg = re.sub('</(p|div)>\s*</blockquote>', '', msg)
# Then definitely remove any blockquote, whatever it is
msg = re.sub('<blockquote.*?>', '<div>', msg)
msg = re.sub('</blockquote>', '</div>', msg)
# Tidy up the HTML
msg = tidy_html(msg)
# Now that HTML has been cleaned, unescape >
msg = msg.replace('>', '>')
msg = msg.replace('&gt;', '>')
msg = re.sub('<br .*?(/>)', '\n', msg)
msg = msg.replace(' ', ' ')
# In case additional <p> or <div> got inserted, interpret
# these as new lines (with a sad trick to do it only once)
# (note that it has been deactivated, as it is messing up
# indentation with >>)
#msg = msg.replace('</div><', '</div>\n<')
#msg = msg.replace('</p><', '</p>\n<')
query = """INSERT INTO cmtRECORDCOMMENT (id_bibrec,
id_user,
body,
date_creation,
star_score,
nb_votes_total,
title,
round_name,
restriction,
in_reply_to_id_cmtRECORDCOMMENT)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
params = (recID, uid, msg, current_date, score, 0, note, round_name, restriction, reply_to or 0)
res = run_sql(query, params)
if res:
new_comid = int(res)
move_attached_files_to_storage(attached_files, recID, new_comid)
parent_reply_order = run_sql("""SELECT reply_order_cached_data from cmtRECORDCOMMENT where id=%s""", (reply_to,))
if not parent_reply_order or parent_reply_order[0][0] is None:
# This is not a reply, but a first 0-level comment
parent_reply_order = ''
else:
parent_reply_order = parent_reply_order[0][0]
run_sql("""UPDATE cmtRECORDCOMMENT SET reply_order_cached_data=%s WHERE id=%s""",
(parent_reply_order + get_reply_order_cache_data(new_comid), new_comid))
action_code = CFG_WEBCOMMENT_ACTION_CODE[reviews and 'ADD_REVIEW' or 'ADD_COMMENT']
action_time = convert_datestruct_to_datetext(time.localtime())
query2 = """INSERT INTO cmtACTIONHISTORY (id_cmtRECORDCOMMENT,
id_bibrec, id_user, client_host, action_time, action_code)
VALUES (%s, %s, %s, inet_aton(%s), %s, %s)"""
params2 = (res, recID, uid, client_ip_address, action_time, action_code)
run_sql(query2, params2)
def notify_subscribers_callback(data):
"""
Define a callback that retrieves subscribed users, and
notify them by email.
@param data: contains the necessary parameters in a tuple:
(recid, uid, comid, msg, note, score, editor_type, reviews)
"""
recid, uid, comid, msg, note, score, editor_type, reviews = data
# Email this comment to 'subscribers'
(subscribers_emails1, subscribers_emails2) = \
get_users_subscribed_to_discussion(recid)
email_subscribers_about_new_comment(recid, reviews=reviews,
emails1=subscribers_emails1,
emails2=subscribers_emails2,
comID=comid, msg=msg,
note=note, score=score,
editor_type=editor_type, uid=uid)
# Register our callback to notify subscribed people after
# having replied to our current user.
data = (recID, uid, res, msg, note, score, editor_type, reviews)
if req:
req.register_cleanup(notify_subscribers_callback, data)
else:
notify_subscribers_callback(data)
return int(res)
def move_attached_files_to_storage(attached_files, recID, comid):
"""
Move the files that were just attached to a new comment to their
final location.
@param attached_files: the mappings of desired filename to attach
and path where to find the original file
@type attached_files: dict {filename, filepath}
@param recID: the record ID to which we attach the files
@param comid: the comment ID to which we attach the files
"""
for filename, filepath in attached_files.iteritems():
os.renames(filepath,
os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recID), str(comid), filename))
def get_attached_files(recid, comid):
"""
Returns a list with tuples (filename, filepath, fileurl)
@param recid: the recid to which the comment belong
@param comid: the commment id for which we want to retrieve files
"""
base_dir = os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recid), str(comid))
if os.path.isdir(base_dir):
filenames = os.listdir(base_dir)
return [(filename, os.path.join(CFG_PREFIX, 'var', 'data', 'comments',
str(recid), str(comid), filename),
CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/' + str(recid) + '/comments/attachments/get/' + str(comid) + '/' + filename) \
for filename in filenames]
else:
return []
def subscribe_user_to_discussion(recID, uid):
"""
Subscribe a user to a discussion, so the she receives by emails
all new new comments for this record.
@param recID: record ID corresponding to the discussion we want to
subscribe the user
@param uid: user id
"""
query = """INSERT INTO cmtSUBSCRIPTION (id_bibrec, id_user, creation_time)
VALUES (%s, %s, %s)"""
params = (recID, uid, convert_datestruct_to_datetext(time.localtime()))
try:
run_sql(query, params)
except:
return 0
return 1
def unsubscribe_user_from_discussion(recID, uid):
"""
Unsubscribe users from a discussion.
@param recID: record ID corresponding to the discussion we want to
unsubscribe the user
@param uid: user id
@return 1 if successful, 0 if not
"""
query = """DELETE FROM cmtSUBSCRIPTION
WHERE id_bibrec=%s AND id_user=%s"""
params = (recID, uid)
try:
res = run_sql(query, params)
except:
return 0
if res > 0:
return 1
return 0
def get_user_subscription_to_discussion(recID, uid):
"""
Returns the type of subscription for the given user to this
discussion. This does not check authorizations (for eg. if user
was subscribed, but is suddenly no longer authorized).
@param recID: record ID
@param uid: user id
@return:
- 0 if user is not subscribed to discussion
- 1 if user is subscribed, and is allowed to unsubscribe
- 2 if user is subscribed, but cannot unsubscribe
"""
user_email = get_email(uid)
(emails1, emails2) = get_users_subscribed_to_discussion(recID, check_authorizations=False)
if user_email in emails1:
return 1
elif user_email in emails2:
return 2
else:
return 0
def get_users_subscribed_to_discussion(recID, check_authorizations=True):
"""
Returns the lists of users subscribed to a given discussion.
Two lists are returned: the first one is the list of emails for
users who can unsubscribe from the discussion, the second list
contains the emails of users who cannot unsubscribe (for eg. author
of the document, etc).
Users appear in only one list. If a user has manually subscribed
to a discussion AND is an automatic recipients for updates, it
will only appear in the second list.
@param recID: record ID for which we want to retrieve subscribed users
@param check_authorizations: if True, check again if users are authorized to view comment
@return tuple (emails1, emails2)
"""
subscribers_emails = {}
# Get users that have subscribed to this discussion
query = """SELECT id_user FROM cmtSUBSCRIPTION WHERE id_bibrec=%s"""
params = (recID,)
res = run_sql(query, params)
for row in res:
uid = row[0]
if check_authorizations:
user_info = collect_user_info(uid)
(auth_code, auth_msg) = check_user_can_view_comments(user_info, recID)
else:
# Don't check and grant access
auth_code = False
if auth_code:
# User is no longer authorized to view comments.
# Delete subscription
unsubscribe_user_from_discussion(recID, uid)
else:
email = get_email(uid)
if '@' in email:
subscribers_emails[email] = True
# Get users automatically subscribed, based on the record metadata
collections_with_auto_replies = CFG_WEBCOMMENT_EMAIL_REPLIES_TO.keys()
for collection in collections_with_auto_replies:
if (get_colID(collection) is not None) and \
(recID in get_collection_reclist(collection)):
fields = CFG_WEBCOMMENT_EMAIL_REPLIES_TO[collection]
for field in fields:
emails = get_fieldvalues(recID, field)
for email in emails:
if not '@' in email:
# Is a group: add domain name
subscribers_emails[email + '@' + \
CFG_SITE_SUPPORT_EMAIL.split('@')[1]] = False
else:
subscribers_emails[email] = False
return ([email for email, can_unsubscribe_p \
in subscribers_emails.iteritems() if can_unsubscribe_p],
[email for email, can_unsubscribe_p \
in subscribers_emails.iteritems() if not can_unsubscribe_p] )
def email_subscribers_about_new_comment(recID, reviews, emails1,
emails2, comID, msg="",
note="", score=0,
editor_type='textarea',
ln=CFG_SITE_LANG, uid=-1):
"""
Notify subscribers that a new comment was posted.
FIXME: consider recipient preference to send email in correct language.
@param recID: record id
@param emails1: list of emails for users who can unsubscribe from discussion
@param emails2: list of emails for users who cannot unsubscribe from discussion
@param comID: the comment id
@param msg: comment body
@param note: comment title
@param score: review star score
@param editor_type: the kind of editor used to submit the comment: 'textarea', 'ckeditor'
@rtype: bool
@return: True if email was sent okay, False if it was not.
"""
_ = gettext_set_language(ln)
if not emails1 and not emails2:
return 0
# Get title
titles = get_fieldvalues(recID, "245__a")
if not titles:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
title = ''
if titles:
title = titles[0]
else:
title = _("Record %i") % recID
# Get report number
report_numbers = get_fieldvalues(recID, "037__a")
if not report_numbers:
report_numbers = get_fieldvalues(recID, "088__a")
if not report_numbers:
report_numbers = get_fieldvalues(recID, "021__a")
# Prepare email subject and body
if reviews:
email_subject = _('%(report_number)s"%(title)s" has been reviewed') % \
{'report_number': report_numbers and ('[' + report_numbers[0] + '] ') or '',
'title': title}
else:
email_subject = _('%(report_number)s"%(title)s" has been commented') % \
{'report_number': report_numbers and ('[' + report_numbers[0] + '] ') or '',
'title': title}
washer = EmailWasher()
msg = washer.wash(msg)
msg = msg.replace('>>', '>')
email_content = msg
if note:
email_content = note + email_content
# Send emails to people who can unsubscribe
email_header = webcomment_templates.tmpl_email_new_comment_header(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=True,
ln=ln,
uid=uid)
email_footer = webcomment_templates.tmpl_email_new_comment_footer(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=True,
ln=ln)
res1 = True
if emails1:
res1 = send_email(fromaddr=CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,
toaddr=emails1,
subject=email_subject,
content=email_content,
header=email_header,
footer=email_footer,
ln=ln)
# Then send email to people who have been automatically
# subscribed to the discussion (they cannot unsubscribe)
email_header = webcomment_templates.tmpl_email_new_comment_header(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=False,
ln=ln,
uid=uid)
email_footer = webcomment_templates.tmpl_email_new_comment_footer(recID,
title,
reviews,
comID,
report_numbers,
can_unsubscribe=False,
ln=ln)
res2 = True
if emails2:
res2 = send_email(fromaddr=CFG_WEBCOMMENT_ALERT_ENGINE_EMAIL,
toaddr=emails2,
subject=email_subject,
content=email_content,
header=email_header,
footer=email_footer,
ln=ln)
return res1 and res2
def get_record_status(recid):
"""
Returns the current status of the record, i.e. current restriction to apply for newly submitted
comments, and current commenting round.
The restriction to apply can be found in the record metadata, in
field(s) defined by config CFG_WEBCOMMENT_RESTRICTION_DATAFIELD. The restriction is empty string ""
in cases where the restriction has not explicitely been set, even
if the record itself is restricted.
@param recid: the record id
@type recid: int
@return tuple(restriction, round_name), where 'restriction' is empty string when no restriction applies
@rtype (string, int)
"""
collections_with_rounds = CFG_WEBCOMMENT_ROUND_DATAFIELD.keys()
commenting_round = ""
for collection in collections_with_rounds:
# Find the first collection defines rounds field for this
# record
if get_colID(collection) is not None and \
(recid in get_collection_reclist(collection)):
commenting_rounds = get_fieldvalues(recid, CFG_WEBCOMMENT_ROUND_DATAFIELD.get(collection, ""))
if commenting_rounds:
commenting_round = commenting_rounds[0]
break
collections_with_restrictions = CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.keys()
restriction = ""
for collection in collections_with_restrictions:
# Find the first collection that defines restriction field for
# this record
if get_colID(collection) is not None and \
recid in get_collection_reclist(collection):
restrictions = get_fieldvalues(recid, CFG_WEBCOMMENT_RESTRICTION_DATAFIELD.get(collection, ""))
if restrictions:
restriction = restrictions[0]
break
return (restriction, commenting_round)
def calculate_start_date(display_since):
"""
Private function
Returns the datetime of display_since argument in MYSQL datetime format
calculated according to the local time.
@param display_since: = all= no filtering
nd = n days ago
nw = n weeks ago
nm = n months ago
ny = n years ago
where n is a single digit number
@return: string of wanted datetime.
If 'all' given as argument, will return datetext_default
datetext_default is defined in miscutils/lib/dateutils and
equals 0000-00-00 00:00:00 => MySQL format
If bad arguement given, will return datetext_default
If library 'dateutil' is not found return datetext_default
and register exception.
"""
time_types = {'d':0, 'w':0, 'm':0, 'y':0}
today = datetime.today()
try:
nb = int(display_since[:-1])
except:
return datetext_default
if display_since in [None, 'all']:
return datetext_default
if str(display_since[-1]) in time_types:
time_type = str(display_since[-1])
else:
return datetext_default
# year
if time_type == 'y':
if (int(display_since[:-1]) > today.year - 1) or (int(display_since[:-1]) < 1):
# 1 < nb years < 2008
return datetext_default
else:
final_nb_year = today.year - nb
yesterday = today.replace(year=final_nb_year)
# month
elif time_type == 'm':
try:
from dateutil.relativedelta import relativedelta
except ImportError:
# The dateutil library is only recommended: if not
# available, then send warning about this.
register_exception(alert_admin=True)
return datetext_default
# obtain only the date: yyyy-mm-dd
date_today = datetime.now().date()
final_date = date_today - relativedelta(months=nb)
yesterday = today.replace(year=final_date.year, month=final_date.month, day=final_date.day)
# week
elif time_type == 'w':
delta = timedelta(weeks=nb)
yesterday = today - delta
# day
elif time_type == 'd':
delta = timedelta(days=nb)
yesterday = today - delta
return yesterday.strftime("%Y-%m-%d %H:%M:%S")
def get_first_comments_or_remarks(recID=-1,
ln=CFG_SITE_LANG,
nb_comments='all',
nb_reviews='all',
voted=-1,
reported=-1,
user_info=None):
"""
Gets nb number comments/reviews or remarks.
In the case of comments, will get both comments and reviews
Comments and remarks sorted by most recent date, reviews sorted by highest helpful score
@param recID: record id
@param ln: language
@param nb_comments: number of comment or remarks to get
@param nb_reviews: number of reviews or remarks to get
@param voted: 1 if user has voted for a remark
@param reported: 1 if user has reported a comment or review
@return: if comment, tuple (comments, reviews) both being html of first nb comments/reviews
if remark, tuple (remakrs, None)
"""
_ = gettext_set_language(ln)
warnings = []
voted = wash_url_argument(voted, 'int')
reported = wash_url_argument(reported, 'int')
## check recID argument
if type(recID) is not int:
return ()
if recID >= 1: #comment or review. NB: suppressed reference to basket (handled in webbasket)
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
res_reviews = query_retrieve_comments_or_remarks(recID=recID, display_order="hh", ranking=1,
limit=nb_comments, user_info=user_info)
nb_res_reviews = len(res_reviews)
## check nb argument
if type(nb_reviews) is int and nb_reviews < len(res_reviews):
first_res_reviews = res_reviews[:nb_reviews]
else:
first_res_reviews = res_reviews
if CFG_WEBCOMMENT_ALLOW_COMMENTS:
res_comments = query_retrieve_comments_or_remarks(recID=recID, display_order="od", ranking=0,
limit=nb_reviews, user_info=user_info)
nb_res_comments = len(res_comments)
## check nb argument
if type(nb_comments) is int and nb_comments < len(res_comments):
first_res_comments = res_comments[:nb_comments]
else:
first_res_comments = res_comments
else: #error
try:
raise InvenioWebCommentError(_('%s is an invalid record ID') % recID)
except InvenioWebCommentError, exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_RECID_INVALID', recID)) #!FIXME dont return error anywhere since search page
# comment
if recID >= 1:
comments = reviews = ""
if reported > 0:
try:
raise InvenioWebCommentWarning(_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif reported == 0:
try:
raise InvenioWebCommentWarning(_('Your feedback could not be recorded, please try again.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
if CFG_WEBCOMMENT_ALLOW_COMMENTS: # normal comments
grouped_comments = group_comments_by_round(first_res_comments, ranking=0)
comments = webcomment_templates.tmpl_get_first_comments_without_ranking(recID, ln, grouped_comments, nb_res_comments, warnings)
if CFG_WEBCOMMENT_ALLOW_REVIEWS: # ranked comments
#calculate average score
avg_score = calculate_avg_score(res_reviews)
if voted > 0:
try:
raise InvenioWebCommentWarning(_('Your feedback has been recorded, many thanks.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, 'green'))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_RECORDED_GREEN_TEXT',))
elif voted == 0:
try:
raise InvenioWebCommentWarning(_('Your feedback could not be recorded, please try again.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_FEEDBACK_NOT_RECORDED_RED_TEXT',))
grouped_reviews = group_comments_by_round(first_res_reviews, ranking=0)
reviews = webcomment_templates.tmpl_get_first_comments_with_ranking(recID, ln, grouped_reviews, nb_res_reviews, avg_score, warnings)
return (comments, reviews)
# remark
else:
return(webcomment_templates.tmpl_get_first_remarks(first_res_comments, ln, nb_res_comments), None)
def group_comments_by_round(comments, ranking=0):
"""
Group comments by the round to which they belong
"""
comment_rounds = {}
ordered_comment_round_names = []
for comment in comments:
comment_round_name = ranking and comment[11] or comment[7]
if not comment_rounds.has_key(comment_round_name):
comment_rounds[comment_round_name] = []
ordered_comment_round_names.append(comment_round_name)
comment_rounds[comment_round_name].append(comment)
return [(comment_round_name, comment_rounds[comment_round_name]) \
for comment_round_name in ordered_comment_round_names]
def calculate_avg_score(res):
"""
private function
Calculate the avg score of reviews present in res
@param res: tuple of tuple returned from query_retrieve_comments_or_remarks
@return: a float of the average score rounded to the closest 0.5
"""
c_star_score = 6
avg_score = 0.0
nb_reviews = 0
for comment in res:
if comment[c_star_score] > 0:
avg_score += comment[c_star_score]
nb_reviews += 1
if nb_reviews == 0:
return 0.0
avg_score = avg_score / nb_reviews
avg_score_unit = avg_score - math.floor(avg_score)
if avg_score_unit < 0.25:
avg_score = math.floor(avg_score)
elif avg_score_unit > 0.75:
avg_score = math.floor(avg_score) + 1
else:
avg_score = math.floor(avg_score) + 0.5
if avg_score > 5:
avg_score = 5.0
return avg_score
def perform_request_add_comment_or_remark(recID=0,
uid=-1,
action='DISPLAY',
ln=CFG_SITE_LANG,
msg=None,
score=None,
note=None,
priority=None,
reviews=0,
comID=0,
client_ip_address=None,
editor_type='textarea',
can_attach_files=False,
subscribe=False,
req=None,
attached_files=None,
warnings=None):
"""
Add a comment/review or remark
@param recID: record id
@param uid: user id
@param action: 'DISPLAY' to display add form
'SUBMIT' to submit comment once form is filled
'REPLY' to reply to an existing comment
@param ln: language
@param msg: the body of the comment/review or remark
@param score: star score of the review
@param note: title of the review
@param priority: priority of remark (int)
@param reviews: boolean, if enabled will add a review, if disabled will add a comment
@param comID: if replying, this is the comment id of the comment we are replying to
@param editor_type: the kind of editor/input used for the comment: 'textarea', 'ckeditor'
@param can_attach_files: if user can attach files to comments or not
@param subscribe: if True, subscribe user to receive new comments by email
@param req: request object. Used to register callback to send email notification
@param attached_files: newly attached files to this comment, mapping filename to filepath
@type attached_files: dict
@param warnings: list of warning tuples (warning_text, warning_color) that should be considered
@return:
- html add form if action is display or reply
- html successful added form if action is submit
"""
_ = gettext_set_language(ln)
if warnings is None:
warnings = []
actions = ['DISPLAY', 'REPLY', 'SUBMIT']
_ = gettext_set_language(ln)
## check arguments
check_recID_is_in_range(recID, warnings, ln)
if uid <= 0:
try:
raise InvenioWebCommentError(_('%s is an invalid user ID.') % uid)
except InvenioWebCommentError, exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_UID_INVALID', uid))
return ''
if attached_files is None:
attached_files = {}
user_contact_info = query_get_user_contact_info(uid)
nickname = ''
if user_contact_info:
if user_contact_info[0]:
nickname = user_contact_info[0]
# show the form
if action == 'DISPLAY':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files)
elif not reviews and CFG_WEBCOMMENT_ALLOW_COMMENTS:
return webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, can_attach_files=can_attach_files)
else:
try:
raise InvenioWebCommentError(_('Comments on records have been disallowed by the administrator.'))
except InvenioWebCommentError, exc:
register_exception(req=req)
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
elif action == 'REPLY':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
try:
raise InvenioWebCommentError(_('Cannot reply to a review.'))
except InvenioWebCommentError, exc:
register_exception(req=req)
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_REPLY_REVIEW',))
return webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files)
elif not reviews and CFG_WEBCOMMENT_ALLOW_COMMENTS:
textual_msg = msg
if comID > 0:
comment = query_get_comment(comID)
if comment:
user_info = get_user_info(comment[2])
if user_info:
date_creation = convert_datetext_to_dategui(str(comment[4]))
# Build two msg: one mostly textual, the other one with HTML markup, for the CkEditor.
msg = _("%(x_name)s wrote on %(x_date)s:")% {'x_name': user_info[2], 'x_date': date_creation}
textual_msg = msg
# 1 For CkEditor input
msg += '\n\n'
msg += comment[3]
msg = email_quote_txt(text=msg)
# Now that we have a text-quoted version, transform into
# something that CkEditor likes, using <blockquote> that
# do still enable users to insert comments inline
msg = email_quoted_txt2html(text=msg,
indent_html=('<blockquote><div>', ' </div></blockquote>'),
linebreak_html=" <br/>",
indent_block=False)
# Add some space for users to easily add text
# around the quoted message
msg = '<br/>' + msg + '<br/>'
# Due to how things are done, we need to
# escape the whole msg again for the editor
msg = cgi.escape(msg)
# 2 For textarea input
textual_msg += "\n\n"
textual_msg += comment[3]
textual_msg = email_quote_txt(text=textual_msg)
return webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, textual_msg, can_attach_files=can_attach_files, reply_to=comID)
else:
try:
raise InvenioWebCommentError(_('Comments on records have been disallowed by the administrator.'))
except InvenioWebCommentError, exc:
register_exception(req=req)
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_COMMENTS_NOT_ALLOWED',))
# check before submitting form
elif action == 'SUBMIT':
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
if note.strip() in ["", "None"] and not CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
try:
raise InvenioWebCommentWarning(_('You must enter a title.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_ADD_NO_TITLE',))
if score == 0 or score > 5:
try:
raise InvenioWebCommentWarning(_('You must choose a score.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(("WRN_WEBCOMMENT_ADD_NO_SCORE",))
if msg.strip() in ["", "None"] and not CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
try:
raise InvenioWebCommentWarning(_('You must enter a text.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_ADD_NO_BODY',))
# if no warnings, submit
if len(warnings) == 0:
if reviews:
if check_user_can_review(recID, client_ip_address, uid):
success = query_add_comment_or_remark(reviews, recID=recID, uid=uid, msg=msg,
note=note, score=score, priority=0,
client_ip_address=client_ip_address,
editor_type=editor_type,
req=req,
reply_to=comID)
else:
try:
raise InvenioWebCommentWarning(_('You already wrote a review for this record.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append('WRN_WEBCOMMENT_CANNOT_REVIEW_TWICE')
success = 1
else:
if check_user_can_comment(recID, client_ip_address, uid):
success = query_add_comment_or_remark(reviews, recID=recID, uid=uid, msg=msg,
note=note, score=score, priority=0,
client_ip_address=client_ip_address,
editor_type=editor_type,
req=req,
reply_to=comID, attached_files=attached_files)
if success > 0 and subscribe:
subscribe_user_to_discussion(recID, uid)
else:
try:
raise InvenioWebCommentWarning(_('You already posted a comment short ago. Please retry later.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append('WRN_WEBCOMMENT_TIMELIMIT')
success = 1
if success > 0:
if CFG_WEBCOMMENT_ADMIN_NOTIFICATION_LEVEL > 0:
notify_admin_of_new_comment(comID=success)
return webcomment_templates.tmpl_add_comment_successful(recID, ln, reviews, warnings, success)
else:
try:
raise InvenioWebCommentError(_('Failed to insert your comment to the database. Please try again.'))
except InvenioWebCommentError, exc:
register_exception(req=req)
body = webcomment_templates.tmpl_error(exc.message, ln)
return body
#errors.append(('ERR_WEBCOMMENT_DB_INSERT_ERROR'))
# if are warnings or if inserting comment failed, show user where warnings are
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, nickname, ln, msg, score, note, warnings, can_attach_files=can_attach_files)
else:
return webcomment_templates.tmpl_add_comment_form(recID, uid, nickname, ln, msg, warnings, can_attach_files=can_attach_files)
# unknown action send to display
else:
try:
raise InvenioWebCommentWarning(_('Unknown action --> showing you the default add comment form.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning', req=req)
warnings.append((exc.message, ''))
#warnings.append(('WRN_WEBCOMMENT_ADD_UNKNOWN_ACTION',))
if reviews and CFG_WEBCOMMENT_ALLOW_REVIEWS:
return webcomment_templates.tmpl_add_comment_form_with_ranking(recID, uid, ln, msg, score, note, warnings, can_attach_files=can_attach_files)
else:
return webcomment_templates.tmpl_add_comment_form(recID, uid, ln, msg, warnings, can_attach_files=can_attach_files)
return ''
def notify_admin_of_new_comment(comID):
"""
Sends an email to the admin with details regarding comment with ID = comID
"""
comment = query_get_comment(comID)
if len(comment) > 0:
(comID2,
id_bibrec,
id_user,
body,
date_creation,
star_score, nb_votes_yes, nb_votes_total,
title,
nb_abuse_reports, round_name, restriction) = comment
else:
return
user_info = query_get_user_contact_info(id_user)
if len(user_info) > 0:
(nickname, email, last_login) = user_info
if not len(nickname) > 0:
nickname = email.split('@')[0]
else:
nickname = email = last_login = "ERROR: Could not retrieve"
review_stuff = '''
Star score = %s
Title = %s''' % (star_score, title)
washer = EmailWasher()
try:
body = washer.wash(body)
except:
body = cgi.escape(body)
record_info = webcomment_templates.tmpl_email_new_comment_admin(id_bibrec)
out = '''
The following %(comment_or_review)s has just been posted (%(date)s).
AUTHOR:
Nickname = %(nickname)s
Email = %(email)s
User ID = %(uid)s
RECORD CONCERNED:
Record ID = %(recID)s
URL = <%(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(comments_or_reviews)s/>
%(record_details)s
%(comment_or_review_caps)s:
%(comment_or_review)s ID = %(comID)s %(review_stuff)s
Body =
<--------------->
%(body)s
<--------------->
ADMIN OPTIONS:
To moderate the %(comment_or_review)s go to %(siteurl)s/%(CFG_SITE_RECORD)s/%(recID)s/%(comments_or_reviews)s/display?%(arguments)s
''' % \
{ 'comment_or_review' : star_score > 0 and 'review' or 'comment',
'comment_or_review_caps': star_score > 0 and 'REVIEW' or 'COMMENT',
'comments_or_reviews' : star_score > 0 and 'reviews' or 'comments',
'date' : date_creation,
'nickname' : nickname,
'email' : email,
'uid' : id_user,
'recID' : id_bibrec,
'record_details' : record_info,
'comID' : comID2,
'review_stuff' : star_score > 0 and review_stuff or "",
'body' : body.replace('<br />','\n'),
'siteurl' : CFG_SITE_URL,
'CFG_SITE_RECORD' : CFG_SITE_RECORD,
'arguments' : 'ln=en&do=od#%s' % comID
}
from_addr = '%s WebComment <%s>' % (CFG_SITE_NAME, CFG_WEBALERT_ALERT_ENGINE_EMAIL)
comment_collection = get_comment_collection(comID)
to_addrs = get_collection_moderators(comment_collection)
rec_collection = guess_primary_collection_of_a_record(id_bibrec)
report_nums = get_fieldvalues(id_bibrec, "037__a")
report_nums += get_fieldvalues(id_bibrec, "088__a")
report_nums = ', '.join(report_nums)
subject = "A new comment/review has just been posted [%s|%s]" % (rec_collection, report_nums)
send_email(from_addr, to_addrs, subject, out)
def check_recID_is_in_range(recID, warnings=[], ln=CFG_SITE_LANG):
"""
Check that recID is >= 0
@param recID: record id
@param warnings: list of warning tuples (warning_text, warning_color)
@return: tuple (boolean, html) where boolean (1=true, 0=false)
and html is the body of the page to display if there was a problem
"""
_ = gettext_set_language(ln)
try:
recID = int(recID)
except:
pass
if type(recID) is int:
if recID > 0:
from invenio.search_engine import record_exists
success = record_exists(recID)
if success == 1:
return (1,"")
else:
try:
raise InvenioWebCommentWarning(_('Record ID %s does not exist in the database.') % recID)
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('ERR_WEBCOMMENT_RECID_INEXISTANT', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='inexistant', recID=recID, ln=ln))
elif recID == 0:
try:
raise InvenioWebCommentWarning(_('No record ID was given.'))
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('ERR_WEBCOMMENT_RECID_MISSING',))
return (0, webcomment_templates.tmpl_record_not_found(status='missing', recID=recID, ln=ln))
else:
try:
raise InvenioWebCommentWarning(_('Record ID %s is an invalid ID.') % recID)
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('ERR_WEBCOMMENT_RECID_INVALID', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='invalid', recID=recID, ln=ln))
else:
try:
raise InvenioWebCommentWarning(_('Record ID %s is not a number.') % recID)
except InvenioWebCommentWarning, exc:
register_exception(stream='warning')
warnings.append((exc.message, ''))
#warnings.append(('ERR_WEBCOMMENT_RECID_NAN', recID))
return (0, webcomment_templates.tmpl_record_not_found(status='nan', recID=recID, ln=ln))
def check_int_arg_is_in_range(value, name, gte_value, lte_value=None):
"""
Check that variable with name 'name' >= gte_value and optionally <= lte_value
@param value: variable value
@param name: variable name
@param errors: list of error tuples (error_id, value)
@param gte_value: greater than or equal to value
@param lte_value: less than or equal to value
@return: boolean (1=true, 0=false)
"""
if type(value) is not int:
try:
raise InvenioWebCommentError('%s is not a number.' % value)
except InvenioWebCommentError, exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message)
return body
#errors.append(('ERR_WEBCOMMENT_ARGUMENT_NAN', value))
return 0
if value < gte_value:
try:
raise InvenioWebCommentError('%s invalid argument.' % value)
except InvenioWebCommentError, exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message)
return body
#errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value))
return 0
if lte_value:
if value > lte_value:
try:
raise InvenioWebCommentError('%s invalid argument.' % value)
except InvenioWebCommentError, exc:
register_exception()
body = webcomment_templates.tmpl_error(exc.message)
return body
#errors.append(('ERR_WEBCOMMENT_ARGUMENT_INVALID', value))
return 0
return 1
def get_mini_reviews(recid, ln=CFG_SITE_LANG):
"""
Returns the web controls to add reviews to a record from the
detailed record pages mini-panel.
@param recid: the id of the displayed record
@param ln: the user's language
"""
if CFG_WEBCOMMENT_ALLOW_SHORT_REVIEWS:
action = 'SUBMIT'
else:
action = 'DISPLAY'
reviews = query_retrieve_comments_or_remarks(recid, ranking=1)
return webcomment_templates.tmpl_mini_review(recid, ln, action=action,
avg_score=calculate_avg_score(reviews),
nb_comments_total=len(reviews))
def check_user_can_view_comments(user_info, recid):
"""Check if the user is authorized to view comments for given
recid.
Returns the same type as acc_authorize_action
"""
# Check user can view the record itself first
(auth_code, auth_msg) = check_user_can_view_record(user_info, recid)
if auth_code:
return (auth_code, auth_msg)
# Check if user can view the comments
## But first can we find an authorization for this case action,
## for this collection?
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'viewcomment', authorized_if_no_roles=True, collection=record_primary_collection)
def check_user_can_view_comment(user_info, comid, restriction=None):
"""Check if the user is authorized to view a particular comment,
given the comment restriction. Note that this function does not
check if the record itself is restricted to the user, which would
mean that the user should not see the comment.
You can omit 'comid' if you already know the 'restriction'
@param user_info: the user info object
@param comid: the comment id of that we want to check
@param restriction: the restriction applied to given comment (if known. Otherwise retrieved automatically)
@return: the same type as acc_authorize_action
"""
if restriction is None:
comment = query_get_comment(comid)
if comment:
restriction = comment[11]
else:
return (1, 'Comment %i does not exist' % comid)
if restriction == "":
return (0, '')
return acc_authorize_action(user_info, 'viewrestrcomment', status=restriction)
def check_user_can_send_comments(user_info, recid):
"""Check if the user is authorized to comment the given
recid. This function does not check that user can view the record
or view the comments
Returns the same type as acc_authorize_action
"""
## First can we find an authorization for this case, action + collection
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'sendcomment', authorized_if_no_roles=True, collection=record_primary_collection)
def check_user_can_attach_file_to_comments(user_info, recid):
"""Check if the user is authorized to attach a file to comments
for given recid. This function does not check that user can view
the comments or send comments.
Returns the same type as acc_authorize_action
"""
## First can we find an authorization for this case action, for
## this collection?
record_primary_collection = guess_primary_collection_of_a_record(recid)
return acc_authorize_action(user_info, 'attachcommentfile', authorized_if_no_roles=False, collection=record_primary_collection)
| jmartinm/InvenioAuthorLists | modules/webcomment/lib/webcomment.py | Python | gpl-2.0 | 89,699 |
from forms import Form
from django.core.exceptions import ValidationError
from django.utils.encoding import StrAndUnicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from fields import IntegerField, BooleanField
from widgets import Media, HiddenInput
from util import ErrorList
__all__ = ('BaseFormSet', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
class BaseFormSet(StrAndUnicode):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data
self.files = files
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
# construct the forms in the formset
self._construct_forms()
def __unicode__(self):
return self.as_table()
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.data or self.files:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num
})
return form
management_form = property(_management_form)
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.data or self.files:
return self.management_form.cleaned_data[TOTAL_FORM_COUNT]
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.data or self.files:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the inital data if it's there, 0 otherwise.
initial_forms = self.initial and len(self.initial) or 0
if initial_forms > self.max_num >= 0:
initial_forms = self.max_num
return initial_forms
def _construct_forms(self):
# instantiate all the forms and put them in self.forms
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
def _get_initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
initial_forms = property(_get_initial_forms)
def _get_extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
extra_forms = property(_get_extra_forms)
def _get_empty_form(self, **kwargs):
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix('__prefix__'),
'empty_permitted': True,
}
if self.data or self.files:
defaults['data'] = self.data
defaults['files'] = self.files
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, None)
return form
empty_form = property(_get_empty_form)
# Maybe this should just go away?
def _get_cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
cleaned_data = property(_get_cleaned_data)
def _get_deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion. Raises an
AttributeError if deletion is not allowed.
"""
if not self.is_valid() or not self.can_delete:
raise AttributeError("'%s' object has no attribute 'deleted_forms'" % self.__class__.__name__)
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
deleted_forms = property(_get_deleted_forms)
def _get_ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order spcified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
ordered_forms = property(_get_ordered_forms)
#@classmethod
def get_default_prefix(cls):
return 'form'
get_default_prefix = classmethod(get_default_prefix)
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is not None:
return self._non_form_errors
return self.error_class()
def _get_errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
def _should_delete_form(self, form):
# The way we lookup the value of the deletion field here takes
# more code than we'd like, but the form's cleaned_data will
# not exist if the form is invalid.
field = form.fields[DELETION_FIELD_NAME]
raw_value = form._raw_value(DELETION_FIELD_NAME)
should_delete = field.clean(raw_value)
return should_delete
def is_valid(self):
"""
Returns True if form.errors is empty for every form in self.forms.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
if bool(self.errors[i]):
forms_valid = False
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
# Give self.clean() a chance to do cross-form validation.
try:
self.clean()
except ValidationError, e:
self._non_form_errors = self.error_class(e.messages)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_(u'Order'), initial=index+1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_(u'Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_(u'Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart-encrypted, i.e. it
has FileInput. Otherwise, False.
"""
return self.forms and self.forms[0].is_multipart()
def _get_media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return Media()
media = property(_get_media)
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = u' '.join([form.as_table() for form in self.forms])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = u' '.join([form.as_p() for form in self.forms])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = u' '.join([form.as_ul() for form in self.forms])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None):
"""Return a FormSet for the given form class."""
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num}
return type(form.__name__ + 'FormSet', (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| MediaSapiens/autonormix | django/forms/formsets.py | Python | bsd-3-clause | 14,683 |
# Copyright (C) 2016 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ooi.wsgi import OCCIMiddleware
from ooi.api.helpers import OpenStackHelper
from soi.config import KEYSTONE_URL, HTTP_HOST, HOST_SCHEME
from soi.synnefo import AstakosClient, AUTH_URL
from soi import utils, compute, storage, storage_link, network, network_links
from kamaki.clients import ClientError
utils.patch_class_methods(OpenStackHelper, compute.function_map)
utils.patch_class_methods(OpenStackHelper, storage.function_map)
utils.patch_class_methods(OpenStackHelper, storage_link.function_map)
utils.patch_class_methods(OpenStackHelper, network.function_map)
utils.patch_class_methods(OpenStackHelper, network_links.function_map)
REDIRECT_MSG = '401 - redirect to: {URL}'
class SNFOCCIMiddleware(OCCIMiddleware):
"""Synnefo wrapper for OCCIMiddleware"""
def __call__(self, environ, response, *args, **kwargs):
"""Check request for essential AUTH-related headers, early"""
if 'HTTP_X_AUTH_TOKEN' not in environ:
print "No token provided, redirect to Astavoms"
status = '401 Not Authorized'
headers = [
('Content-Type', 'text/html'),
(
'Www-Authenticate',
'Keystone uri=\'{0}\''.format(KEYSTONE_URL)
)
]
response(status, headers)
msg = REDIRECT_MSG.format(URL=KEYSTONE_URL)
print msg
return [msg]
print 'Token provided'
snf_token = environ['HTTP_X_AUTH_TOKEN']
try:
snf_project = environ.get('HTTP_X_SNF_PROJECT') or environ[
'HTTP_X_PROJECT_ID']
print 'Project ID provided'
except KeyError:
print "No project header, ask Astakos for project ID"
snf_auth = AstakosClient(AUTH_URL, snf_token)
try:
user_info = snf_auth.authenticate()
except ClientError as ce:
print ce.status, ce, ce.details
status = '{0} {1}'.format(ce.status, ce)
headers = [
('Content-Type', 'application/json'),
('Content-Length', len(ce.details))
]
response(status, headers)
return [ce.details, ]
projects = user_info['access']['user']['projects']
user_uuid = user_info['access']['user']['id']
snf_project = user_uuid
for project in projects:
if project != user_uuid:
snf_project = project
print "Found a project - hope it suffices"
break
if snf_project == user_uuid:
print 'Fall back to user UUID as project ID'
environ['HTTP_X_PROJECT_ID'] = snf_project
environ['HTTP_HOST'] = HTTP_HOST
environ['wsgi.url_scheme'] = HOST_SCHEME
return super(SNFOCCIMiddleware, self).__call__(
environ, response, *args, **kwargs)
| grnet/snf-occi | soi/wsgi.py | Python | gpl-3.0 | 3,651 |
# -*- coding: utf-8 -*-
import re
numeric_re = re.compile(r'^\d+$')
def is_int(s):
return re.match(s) is not None
def string_to_list(strs, spliter=','):
"""Convert the string to list"""
if isinstance(strs, list):
str_list = (unicode(item).strip() for item in strs)
elif strs.find(spliter):
str_list = (unicode(item).strip() for item in strs.split(spliter))
else:
str_list = (strs,)
return [s for s in str_list if s]
def form_errors_to_list(form):
"""
Convert errors of form to list
Use for Ajax.Request response
"""
return [(k, unicode(v[0])) for k, v in form.errors.items()]
def get_string_combinations(s):
"""
@param s: string
@return: a list containing s and the lowercase, uppercase
& first letter uppercase form of s.
"""
return s, s.lower(), s.upper(), s.capitalize()
def calc_percent(x, y):
if not x or not y:
return 0
return float(x) / y * 100
def request_host_link(request, domain_name=None):
if request.is_secure():
protocol = 'https://'
else:
protocol = 'http://'
if not domain_name:
domain_name = request.get_host()
return protocol + domain_name
def clean_request(request, keys=None):
"""
Clean the request strings
"""
request_contents = request.GET.copy()
if not keys:
keys = request_contents.keys()
rt = {}
for k in keys:
k = str(k)
if request_contents.get(k):
if k == 'order_by' or k == 'from_plan':
continue
v = request.GET[k]
# Convert the value to be list if it's __in filter.
if k.endswith('__in') and isinstance(v, unicode):
v = string_to_list(v)
rt[k] = v
return rt
class QuerySetIterationProxy(object):
'''Iterate a series of object and its associated objects at once
This iteration proxy applies to this kind of structure especially.
Group Properties Logs
-------------------------------------------------
group 1 property 1 log at Mon.
property 2 log at Tue.
property 3 log at Wed.
-------------------------------------------------
group 2 property 4 log at Mon.
property 5 log at Tue.
property 6 log at Wed.
-------------------------------------------------
group 3 property 7 log at Mon.
property 8 log at Tue.
property 9 log at Wed.
where, in each row of the table, one or more than one properties and logs
to be shown along with the group.
'''
def __init__(self, iterable, associate_name=None, **associated_data):
'''Initialize proxy
Arguments:
- iterable: an iterable object representing the main set of objects.
- associate_name: the attribute name of each object within iterable,
from which value is retrieve to get associated data from
associate_data. Default is 'pk'.
- associate_data: the associated data, that contains all data for each
item in the set referenced by iterable. You can pass mulitple
associated data as the way of Python **kwargs. The associated data
must be grouped by the value of associate_name.
'''
self._iterable = iter(iterable)
self._associate_name = associate_name
if self._associate_name is None:
self._associate_name = 'pk'
self._associated_data = associated_data
def __iter__(self):
return self
def next(self):
next_one = self._iterable.next()
for name, lookup_table in self._associated_data.iteritems():
setattr(next_one,
name,
lookup_table.get(
getattr(next_one, self._associate_name, None),
()))
return next_one
| MrSenko/Nitrate | tcms/core/utils/__init__.py | Python | gpl-2.0 | 4,047 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
import math
from op_test import OpTest
import copy
def box_clip(input_box, im_info, output_box):
im_w = round(im_info[1] / im_info[2])
im_h = round(im_info[0] / im_info[2])
output_box[:, :, 0] = np.maximum(
np.minimum(input_box[:, :, 0], im_w - 1), 0)
output_box[:, :, 1] = np.maximum(
np.minimum(input_box[:, :, 1], im_h - 1), 0)
output_box[:, :, 2] = np.maximum(
np.minimum(input_box[:, :, 2], im_w - 1), 0)
output_box[:, :, 3] = np.maximum(
np.minimum(input_box[:, :, 3], im_h - 1), 0)
def batch_box_clip(input_boxes, im_info, lod):
n = input_boxes.shape[0]
m = input_boxes.shape[1]
output_boxes = np.zeros((n, m, 4), dtype=np.float32)
cur_offset = 0
for i in range(len(lod)):
box_clip(input_boxes[cur_offset:(cur_offset + lod[i]), :, :],
im_info[i, :],
output_boxes[cur_offset:(cur_offset + lod[i]), :, :])
cur_offset += lod[i]
return output_boxes
class TestBoxClipOp(OpTest):
def test_check_output(self):
self.check_output()
def setUp(self):
self.op_type = "box_clip"
lod = [[1, 2, 3]]
input_boxes = np.random.random((6, 10, 4)) * 5
im_info = np.array([[5, 8, 1.], [6, 6, 1.], [7, 5, 1.]])
output_boxes = batch_box_clip(input_boxes, im_info, lod[0])
self.inputs = {
'Input': (input_boxes.astype('float32'), lod),
'ImInfo': im_info.astype('float32'),
}
self.outputs = {'Output': output_boxes}
if __name__ == '__main__':
unittest.main()
| PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_box_clip_op.py | Python | apache-2.0 | 2,286 |
#!/usr/bin/env python
"""
counts indicies (aka barcodes) from illumina fastq file
"""
import sys
import os
import time
import argparse
import glob
import collections
import ConfigParser
import shlex
import pipes
import subprocess
import tempfile
import re
import errno
import fileinput
import gzip
### Constants #################################################################
################################################################################
def num2str(val, none_val='0'):
return none_val if val is None else str(val)
class ConfigFakeSecHead(object):
def __init__(self, fp, section='DEFAULTS'):
self.fp = fp
self.sechead = '['+str(section)+']\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else: return self.fp.readline()
class CustomArgparseHelpFormatter(argparse.HelpFormatter):
"""Help message formatter for argparse
combining RawTextHelpFormatter and ArgumentDefaultsHelpFormatter
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
def _split_lines(self, text, width):
return text.splitlines()
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
################################################################################
### Main ######################################################################
def exit(retcode=0):
# cleanup and exit
global g_start_tic
print >>sys.stderr, "END", "%0.2f"%(time.time()-g_start_tic),"secs elapsed"
sys.exit(retcode)
def Main(argv=None):
print >>sys.stderr, "START"
global g_start_tic
g_start_tic = time.time()
# parse cfg_file argument
conf_parser = argparse.ArgumentParser(description=__doc__,
formatter_class=CustomArgparseHelpFormatter,
add_help=False) # turn off help so later parse (with all opts) handles it
conf_parser.add_argument('-c', '--cfg-file', type=argparse.FileType('r'), help="Config file specifiying options/parameters.\nAny long option can be set by remove the leading '--' and replace '-' with '_'")
args, remaining_argv = conf_parser.parse_known_args(argv)
# build the config (read config files)
if args.cfg_file:
cfg = ConfigParser.SafeConfigParser()
cfg.readfp(ConfigFakeSecHead(args.cfg_file))
defaults = dict(cfg.items("DEFAULTS"))
# special handling of paratmeters that need it like lists
if( 'fastq_files' in defaults ): # fastq_files needs to be a list
defaults['fastq_files'] = [ x for x in defaults['fastq_files'].split('\n') if x and x.strip() and not x.strip()[0] in ['#',';'] ]
else:
defaults = {}
# @TCC TEMP WHILE DEVELOPING
#defaults['fastq_files'] = ['/data/archive/2014-07-21/140715_HS3B/Undetermined_indices/Sample_lane1/lane1_Undetermined_L001_R1_001.fastq.gz']
#defaults['fastq_files'] = ['/data/archive/2014-07-21/140715_HS3B/Undetermined_indices/Sample_lane2/lane2_Undetermined_L002_R1_001.fastq.gz']
# Parse rest of arguments with a new ArgumentParser
aparser = argparse.ArgumentParser(description=__doc__, parents=[conf_parser], formatter_class=CustomArgparseHelpFormatter)
aparser.add_argument('fastq_files', metavar='fastq-file', nargs='*')
aparser.add_argument('-n', '--num-reads', default=None, help="Number of reads to look at, default=all")
aparser.add_argument('-N', '--num-barcodes', default=10, type=int, help="Number of (most frequent) barcode sequences to report on; 0 for all ")
aparser.add_argument('-v', '--verbose', action='count', help="Increase verbosity level")
aparser.set_defaults(**defaults)
# process options/arguments
args = aparser.parse_args(remaining_argv)
# custom/more complex argument parsing errors
# need fastq_files list
if( not args.fastq_files ):
aparser.error("Must provide fastq files as arguments and/or in the CFG_FILE (fastq_files parameter)")
## expand potential globs (wildcards) in the bam_files list
# also checks that bam files exist
fastq_files = []
for f in args.fastq_files:
tmp = glob.glob(f)
if( not tmp ):
raise IOError('Cannot find fastq file(s) matching "'+str(f)+'"')
fastq_files.extend(tmp)
counts = {}
for filename in fastq_files:
print >>sys.stderr, "#"*180
print >>sys.stderr, "Reading:", f
if args.num_reads:
tmp = int(float(args.num_reads))
with gzip.open(filename) as f:
in_block_count = 0 # counter used to divide up blocks
for l in f:
if( in_block_count == 0 ): # header line is first line in block
#sys.stdout.write(l)
idx = l.split(':')[-1][:-1]
if( idx in counts ):
counts[idx] += 1
else:
counts[idx] = 1
in_block_count += 1
if( in_block_count > 3 ): # blocks are 4 lines long
in_block_count = 0
# only look at part of file?
if args.num_reads:
tmp -= 1
if( tmp < 0 ):
break
sorted_list = [(k,v) for v,k in sorted( [(v,k) for k,v in counts.items()],reverse=True) ]
if args.num_barcodes > 0:
to_report = sorted_list[0:args.num_barcodes]
else:
to_report = sorted_list
for v in to_report:
print v
# Cleanup and end normally
exit()
## functions #############################################################################
#########################################################################
# Main loop hook... if run as script run main, else this is just a module
if __name__ == "__main__":
sys.exit(Main(argv=None))
| travc/ngs_scripts | count_illumina_indicies.py | Python | mit | 6,350 |
import asp.jit.asp_module as asp_module
import numpy as np
from em import *
import pickle
import sys
param_type_map = {
'num_blocks_estep': ('cardinal','variant'),
'num_threads_estep': ('cardinal','variant'),
'num_threads_mstep': ('cardinal','variant'),
'num_event_blocks': ('cardinal','variant'),
'max_num_dimensions': ('cardinal','variant'),
'max_num_components': ('cardinal','variant'),
'max_num_dimensions_covar_v3': ('cardinal','variant'),
'max_num_components_covar_v3': ('cardinal','variant'),
'diag_only': ('binary','variant'),
'max_iters': ('cardinal','variant'),
'min_iters': ('cardinal','variant'),
'covar_version_name': ('nominal','variant'),
'supports_32b_floating_point_atomics': ('nominal','machine'),
'max_xy_grid_dim': ('cardinal','machine'),
'max_threads_per_block': ('cardinal','machine'),
'max_shared_memory_capacity_per_SM': ('cardinal','machine')
}
if __name__ == '__main__':
ifile_name = sys.argv[1]
ofile_name = sys.argv[2]
func_name = sys.argv[3]
device_id = sys.argv[4]
gmm = GMM(1,1)
mod = gmm.get_asp_mod()
mod.restore_method_timings(func_name,ifile_name)
var_names = mod.compiled_methods[func_name].v_id_list
param_names = mod.compiled_methods[func_name].param_names
var_times = mod.compiled_methods[func_name].database.variant_times
f = file(ofile_name, 'a')
f.write("Heading, Function Name, Device Name, Input Params,,,Variant Params"+","*len(param_names)+"Time\n")
f.write("Name,function,device,M,D,N,%s,Time\n" % ','.join(param_names))
f.write("Type,nominal,nominal,cardinal,cardinal,cardinal,%s,real\n" %
','.join([param_type_map.get(n,'unknown')[0] for n in param_names]))
f.write("Prefix,problem,machine,problem,problem,problem,%s,performance\n" %
','.join([param_type_map.get(n,'unknown')[1] for n in param_names]))
for size, times in var_times.items():
for name in var_names:
time = times[name]
f.write(",%s,%s,%s,%s,%s\n" % ( func_name,
device_id,
','.join([str(p) for p in size[1:]]),
','.join(name.split('_')[1:]),
time ) )
f.close()
| hcook/gmm | tests/em_convert_from_pickle_dump_to_csv.py | Python | bsd-3-clause | 2,516 |
#! /usr/bin/env python
#------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# tailswitch
#------------------------------------------------------------------------------
from config.serverConfig import ServerConfig
from controller.solr.solrServer import SolrServer
from controller.solr.search.locationLookup import LocationLookup
import time
from pprint import pprint
server = ServerConfig()
solr = SolrServer()
#------------------------------------------------------------------------------
# exposed methods
#------------------------------------------------------------------------------
def startSolrServer():
"""
Starts the solr server in a new thread.
server url http://127.0.0.1:8983/solr
"""
solr.start()
time.sleep(4)
def locationSerach():
"""
Search the solr locationData database for a given location.
format as zipcode, city, or zipcode & city
"""
startSolrServer()
test = LocationLookup("localhost", 8983, "locationData")
# example code
test.search("mount washington")
results1 = test.results.documents
test.search("89506 ren")
results2 = test.results.documents
test.search("monica")
results3 = test.results.documents
test.search("90016")
results4 = test.results.documents
test.search("dieg")
results5 = test.results.documents
print('\n-------------------\n')
pprint(results1)
print("\n---------------------\n")
pprint(results2)
print("\n---------------------\n")
pprint(results3)
print("\n---------------------\n")
pprint(results4)
print("\n---------------------\n")
pprint(results5)
locationSerach()
| knittledan/Location_Search_Prediction | server/core/solrSearch.py | Python | mit | 1,845 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for tensorflow/keras."""
import typing
import tensorflow.compat.v2 as tf
def create_mlp(
input_dim,
output_dim,
hidden_dims = (256, 256),
activation = tf.nn.relu,
near_zero_last_layer = True,
normalize_last_layer = False,
):
"""Creates an MLP.
Args:
input_dim: input dimensionaloty.
output_dim: output dimensionality.
hidden_dims: hidden layers dimensionality.
activation: activations after hidden units.
Returns:
An MLP model.
"""
initialization = tf.keras.initializers.VarianceScaling(
scale=0.333, mode='fan_in', distribution='uniform')
near_zero_initialization = tf.keras.initializers.VarianceScaling(
scale=1e-2, mode='fan_in', distribution='uniform')
last_layer_initialization = (
near_zero_initialization if near_zero_last_layer else initialization)
layers = []
for hidden_dim in hidden_dims:
layers.append(
tf.keras.layers.Dense(
hidden_dim,
activation=activation,
kernel_initializer=initialization))
layers += [
tf.keras.layers.Dense(
output_dim, kernel_initializer=last_layer_initialization)
]
if normalize_last_layer:
layers += [tf.keras.layers.LayerNormalization(epsilon=1e-6)]
if isinstance(input_dim, int):
input_shape = (input_dim,)
else:
input_shape = input_dim
inputs = tf.keras.Input(shape=input_dim)
outputs = tf.keras.Sequential(layers)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.call = tf.function(model.call)
return model
def my_reset_states(metric):
"""Resets metric states.
Args:
metric: A keras metric to reset states for.
"""
for var in metric.variables:
var.assign(0)
def orthogonal_regularization(model, reg_coef=1e-4):
"""Orthogonal regularization v2.
See equation (3) in https://arxiv.org/abs/1809.11096.
Args:
model: A keras model to apply regualization for.
reg_coef: Orthogonal regularization coefficient.
Returns:
A regularization loss term.
"""
reg = 0
for layer in model.layers:
if isinstance(layer, tf.keras.layers.Dense):
prod = tf.matmul(tf.transpose(layer.kernel), layer.kernel)
reg += tf.reduce_sum(tf.math.square(prod * (1 - tf.eye(prod.shape[0]))))
return reg * reg_coef
| google-research/google-research | rl_repr/batch_rl/keras_utils.py | Python | apache-2.0 | 2,910 |
import falcon
# from main.settings import DB as db
# from main.helpers import QueryParser
import json
import urlparse
from werkzeug.http import parse_options_header
from werkzeug.formparser import parse_form_data
from cStringIO import StringIO
from werkzeug.wsgi import LimitedStream
from werkzeug import secure_filename
class CreateTemplateExclusiveImage:
"""End point for creating dealtype"""
def on_get(self, req, resp, stream, form={}, files={}):
"""return status 405. asks to use post api.
"""
resp.content_type = "application/json"
resp_dict = {"status": "error",
"summary": "use post request for logout"}
resp.body = (json.dumps(resp_dict))
def on_post(self, req, resp, stream, form={}, files={}):
"""
"""
file = files.get('file', [''])[0]
if file:
filename = secure_filename(file.filename)
file.save(filename)
resp.status = falcon.HTTP_200
resp.content_type = "application/json"
resp_dict = {"status": "success",
"summary": "File uploaded"}
resp.body = (json.dumps(resp_dict))
def generate_formdata(req, resp, params):
"""sets params['form'], params['files'], params['stream']
to pass to every endpoint.
"""
if req.method != 'GET':
mimetype, options = parse_options_header(req.get_header('content-type'))
data = req.stream.read()
environ = {'wsgi.input': StringIO(data),
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': '%s; boundary=%s' %
(mimetype, options['boundary']),
'REQUEST_METHOD': 'POST'}
stream, form, files = parse_form_data(environ)
params['stream'], params['form'], params['files'] = stream, dict(form),\
dict(files)
return True
else:
di = urlparse.parse_qsl(req.query_string)
params['form'] = dict(di)
params['stream'] = LimitedStream()
params['files'] = dict()
return True
# hooks to be executed on every request before reaching to the endpoint
app = falcon.API(before=[generate_formdata])
# importing all the endpoints
cr = CreateTemplateExclusiveImage()
app.add_route('/upload', cr)
| vinaykrdahiya/justuploads | main/__init__.py | Python | unlicense | 2,292 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import sys
import time
import os
sys.path.append(os.getcwd()+'/../../../')
sys.path.append(os.getcwd()+"/../")
from model.NumNer import NumRec
from model.CBGM import CBGM
from model.PlaceNer import PlaceRec
from model.NameNer import CNNAME,decode
from config.config import Config
TRAIN_FILE = '../train/data/pku_data.txt'
TRAIN_FILE2 = '../train/data/msr_data.txt'
USER_DICT = '../train/data/userdict.txt'
TEST_FILE = Config.PKU_TEST
TEST_FILE2 = Config.MSR_TEST
TEST_OUTPUT = '../score/output.txt'
class Seg(object):
def __init__(self):
self.segger = CBGM()
self.Pner = PlaceRec()
self.Numner = NumRec()
self.cname = CNNAME()
self.cname.fit()
self.idict = {}
self.load_dict()
def load_dict(self):
f = codecs.open(USER_DICT,'r')
for line in f:
if sys.version < '3.0':
if not (type(line) is unicode):
try:
line = line.decode('utf-8')
except:
line = line.decode('gbk', 'ignore')
word = line.strip()
self.idict[word]=1
def train(self, fname):
fr = codecs.open(fname, 'r', 'utf-8')
data = []
for i in fr:
line = i.strip()
if not line:
continue
tmp = map(lambda x: x.split('/'), line.split())
data.append(tmp)
fr.close()
self.segger.train(data)
def seg(self, sentence):
'''标签到汉字序列转换'''
ret = self.segger.tag(sentence)
res = []
tmp = ''
for i in ret:
if i[1] == 'e':
res.append(tmp+i[0])
tmp = ''
elif i[1] == 'b' or i[1] == 's':
if tmp:
res.append(tmp)
tmp = i[0]
else:
tmp += i[0]
if tmp:
res.append(tmp)
return res
def Name_Replace(namelist,sen):
for name in namelist.strip().split(' '):
index = 0
if len(name)>=3:
while index<len(sen):
if index < len(sen)-2 and sen[index]== name[0] and sen[index+1] == name[1] and sen[index+2] == name[2]:
tmp = name[1]+name[2]
sen = sen[:index+1] + [tmp] + sen[index+3:]
index+=1
index+=1
return sen
if __name__ == '__main__':
print(time.strftime('%Y-%m-%d %H:%M:%S'))
seg = Seg()
seg.train(TRAIN_FILE2)
f = open(TEST_FILE2)
f2 = open(TEST_OUTPUT,'wb')
print('model loaded')
print(time.strftime('%Y-%m-%d %H:%M:%S'))
for line in f:
if sys.version < '3.0':
if not (type(line) is unicode):
try:
line = line.decode('utf-8')
except:
line = line.decode('gbk', 'ignore')
line = line.strip()
res = seg.seg(line)
res1 = seg.Numner.NumNer(res)
# res2 = seg.Pner.Place_Ner(res1)
# namelist = decode(seg.cname,res2)
# res3 = Name_Replace(namelist,res2)
ans = ' '.join(res1)
ans+='\n'
f2.write(ans.encode('utf-8'))
print(time.strftime('%Y-%m-%d %H:%M:%S')) | muyeby/NLP | Seg/CBGseg/test/seg.py | Python | mit | 3,396 |
# -*- coding: utf-8 -*-
'''
Created on 15 Φεβ 2013
@author: tedlaz
'''
from PyQt4 import QtCore, QtGui,Qt
from collections import OrderedDict
import utils_db as dbutils
import widgets
from utils_qt import fFindFromList
import osyk
#import classwizard_rc
sqlInsertFpr = u'''
INSERT INTO m12_fpr (epon,onom,patr,mitr,sex_id,igen,afm,amka,aika,pol,odo,num,tk) VALUES ('{0}','{1}','{2}','{3}','{4}','{5}','{6}','{7}','{8}','{9}','{10}','{11}','{12}')
'''
sqlInsertPro = u'''
INSERT INTO m12_pro (prod,fpr_id,coy_id,eid_id,proy,aptyp_id,apod) VALUES ('{0}','{1}','{2}','{3}','{4}','{5}','{6}')
'''
class NewEmpWizard(QtGui.QWizard):
def __init__(self, parent=None):
super(NewEmpWizard, self).__init__(parent)
self.setAttribute(Qt.Qt.WA_DeleteOnClose)
if parent:
self.db = parent.parent.db
else:
self.db = None
self.addPage(IntroPage(self))
self.addPage(coDataPage(self))
self.addPage(eidPage(self))
self.addPage(finalPage(self))
self.setWizardStyle(QtGui.QWizard.ModernStyle)
self.setOption(QtGui.QWizard.IndependentPages,True)
#self.setPixmap(QtGui.QWizard.BannerPixmap,QtGui.QPixmap(':/banner'))
#self.setPixmap(QtGui.QWizard.BackgroundPixmap, QtGui.QPixmap(':/background'))
self.setWindowTitle(u"Οδηγός Πρόσληψης Εργαζομένου")
def accept(self):
sqlfpr = sqlInsertFpr.format(self.field('epon'),self.field('onom'),self.field('patr'),self.field('mitr'),
self.field('sex_id'),self.field('igen'),self.field('afm'),self.field('amka'),
self.field('aika'),self.field('pol'),self.field('odo'),self.field('num'),self.field('tk'))
fpr_id = dbutils.commitToDb(sqlfpr, self.db)
sqlpro = sqlInsertPro.format(self.field('prod'),fpr_id,self.field('coy_id'),self.field('eid_id'),
self.field('proy'),self.field('aptyp_id'),self.field('apod'))
pr_id = dbutils.commitToDb(sqlpro, self.db)
print u'Η εγγραφή αποθηκεύτηκε με κωδικούς {0}, {1}'.format(fpr_id,pr_id)
super(NewEmpWizard, self).accept()
class IntroPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(IntroPage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Οδηγίες")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap, QtGui.QPixmap(':/watermark1'))
label = QtGui.QLabel(u"Αυτός ο οδηγός θα δημιουργήσει Νέα Πρόσληψη Εργαζομένου.\n\n "
u"Για να προχωρήσετε θα πρέπει να εισάγετε τα απαραίτητα δεδομένα \n\n "
u"Πατήστε δημιουργία στην τελευταία οθόνη για να ολοκληρώσετε.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class coDataPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(coDataPage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Εισαγωγή σοιχείων Εργαζομένου")
self.setSubTitle(u"Συμπληρώστε τα στοιχεία του εργαζομένου")
#self.setPixmap(QtGui.QWizard.LogoPixmap, QtGui.QPixmap(':/logo1'))
self.labels = OrderedDict()
self.fields = OrderedDict()
self.labels['epon']= QtGui.QLabel(u"Επώνυμο:")
self.fields['epon'] = widgets.DbLineEdit()
self.labels['onom']= QtGui.QLabel(u"Όνομα:")
self.fields['onom'] = widgets.DbLineEdit()
self.labels['patr']= QtGui.QLabel(u"Πατρώνυμο:")
self.fields['patr'] = widgets.DbLineEdit()
self.labels['mitr']= QtGui.QLabel(u"Μητρώνυμο:")
self.fields['mitr'] = widgets.DbLineEdit()
self.labels['sex_id']= QtGui.QLabel(u"Φύλο:")
self.fields['sex_id'] = widgets.DbComboBox([[0,u'Άνδρας'],[1,u'Γυναίκα']])
self.labels['igen']= QtGui.QLabel(u"Ημ.Γέννησης:")
self.fields['igen'] = widgets.DbDateEdit()
self.labels['afm']= QtGui.QLabel(u"ΑΦΜ:")
self.fields['afm'] = widgets.DbLineEdit()
self.labels['doy'] = QtGui.QLabel(u"ΔΟΥ:")
self.fields['doy'] = widgets.DbLineEdit()
self.fields['doy'].setReadOnly(True)
doyFindButton = QtGui.QPushButton(u'...')
doyFindButton.setMaximumSize(QtCore.QSize(20, 50))
doyLayout = QtGui.QHBoxLayout()
doyLayout.addWidget(self.fields['doy'])
doyLayout.addWidget(doyFindButton)
def openFindDlg():
head = [u'Κωδ',u'ΔΟΥ']
cw = [35,300]
form = fFindFromList(osyk.doy_list(),head,cw)
if form.exec_() == QtGui.QDialog.Accepted:
self.fields['doy'].setText(form.array[1])
doyFindButton.clicked.connect(openFindDlg)
self.labels['amka']= QtGui.QLabel(u"ΑΜΚΑ:")
self.fields['amka'] = widgets.DbLineEdit()
self.labels['aika']= QtGui.QLabel(u"Αμ.ΙΚΑ:")
self.fields['aika'] = widgets.DbLineEdit()
self.labels['pol']= QtGui.QLabel(u"Πόλη:")
self.fields['pol'] = widgets.DbLineEdit()
self.labels['tk']= QtGui.QLabel(u"Ταχ.Κωδικός:")
self.fields['tk'] = widgets.DbLineEdit()
self.labels['odo']= QtGui.QLabel(u"Οδός:")
self.fields['odo'] = widgets.DbLineEdit()
self.labels['num']= QtGui.QLabel(u"Αριθμός:")
self.fields['num'] = widgets.DbLineEdit()
layout = QtGui.QGridLayout()
i = j = 0
for k in self.labels:
self.labels[k].setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
if k == 'doy':
layout.addWidget(self.labels[k],i,j+0)
layout.addLayout(doyLayout,i,j+1)
else:
layout.addWidget(self.labels[k],i,j+0)
layout.addWidget(self.fields[k],i,j+1)
self.labels[k].setBuddy(self.fields[k])
self.registerField('%s'% k,self.fields[k],'timi')
if j == 0:
j=2
else:
j=0
i += 1
self.setLayout(layout)
class eidPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(eidPage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Ειδικότητα Εργασίας")
self.setSubTitle(u"Παράρτημα απασχόλησης και ειδικότητα εργασίας")
#self.setPixmap(QtGui.QWizard.LogoPixmap, QtGui.QPixmap(':/logo1'))
self.labels = OrderedDict()
self.fields = OrderedDict()
self.labels['coy_id']= QtGui.QLabel(u"Περιοχή εργασίας:")
self.fields['coy_id'] = widgets.DbComboBox([[1,u'Κεντρικό'],])
self.labels['prod']= QtGui.QLabel(u"Ημ/νία Πρόσληψης:")
self.fields['prod'] = widgets.DbDateEdit()
self.labels['mereser']= QtGui.QLabel(u"Μέρες εργασίας:")
self.fields['mereser'] = widgets.WeekDays()
self.labels['dymmy0']= QtGui.QLabel(u"")
self.fields['dymmy0'] = QtGui.QLabel(u"")
self.labels['prIn']= QtGui.QLabel(u"Προσέλευση:")
self.fields['prIn'] = QtGui.QTimeEdit(self)
self.fields['prIn'].setDisplayFormat("HH:mm")
self.labels['prOut']= QtGui.QLabel(u"Αποχώρηση:")
self.fields['prOut'] = QtGui.QTimeEdit(self)
self.fields['prOut'].setDisplayFormat("HH:mm")
self.labels['diIn']= QtGui.QLabel(u"Διάλειμμα από:")
self.fields['diIn'] = QtGui.QTimeEdit(self)
self.fields['diIn'].setDisplayFormat("HH:mm")
self.labels['diOut']= QtGui.QLabel(u"Διάλειμμα έως:")
self.fields['diOut'] = QtGui.QTimeEdit(self)
self.fields['diOut'].setDisplayFormat("HH:mm")
self.labels['apType']= QtGui.QLabel(u"Τύπος απασχόλησης:")
self.fields['apType'] = widgets.DbComboBox([[1,u'Πλήρης απασχόληση'],[2,u'Μερική απασχόληση']])
self.labels['apdiar']= QtGui.QLabel(u"Διάρκεια απασχόλησης:")
self.fields['apdiar'] = widgets.DbComboBox([[1,u'Αορίστου χρόνου'],[2,u'Ορισμένου χρόνου']])
self.labels['eid_id']= QtGui.QLabel(u"Ειδικότητα:")
self.fields['eid_id'] = widgets.ButtonLineEdit('SELECT id,eidp FROM m12_eid',u'aa|Ειδικότητα',parent.db)
self.labels['proy']= QtGui.QLabel(u"Προυπηρεσία")
self.fields['proy'] = widgets.DbSpinBox()
self.labels['aptyp_id']= QtGui.QLabel(u"Τύπος αποδοχών:")
self.fields['aptyp_id'] = widgets.DbComboBox([[1,u'Μισθός'],[2,u'Ημερομίσθιο'],[3,u'Ωρομίσθιο']])
self.labels['apod']= QtGui.QLabel(u"Αποδοχές:")
self.fields['apod'] = widgets.DbDoubleSpinBox()
layout = QtGui.QGridLayout()
i = j = 0
for k in self.labels:
layout.addWidget(self.labels[k],i,j+0)
layout.addWidget(self.fields[k],i,j+1)
self.labels[k].setBuddy(self.fields[k])
self.registerField(k,self.fields[k],'timi')
if j == 0:
j=2
else:
j=0
i += 1
self.setLayout(layout)
class finalPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(finalPage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.FinishButton,u'Ολοκλήρωση')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Ολοκλήρωση πρόσληψης εργαζομένου ")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap, QtGui.QPixmap(':/watermark2'))
self.label = QtGui.QLabel()
self.label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
self.setLayout(layout)
def initializePage(self):
finishText = self.wizard().buttonText(QtGui.QWizard.FinishButton)
finishText.replace('&', '')
txt = u'<h3>Προσοχή η διαδικασία θα ολοκληρωθεί με τα παρακάτω δεδομένα :</h3>'
txt += u'Επώνυμο : <b>{0}</b> '.format(self.field('epon'))
txt += u'Όνομα : <b>%s</b> <br>' % self.field('onom')
txt += u'Πατρώνυμο : <b>%s</b> ' % self.field('patr')
txt += u'Μητρώνυμο : <b>%s</b><br>' % self.field('mitr')
txt += u'Φύλο : <b>%s</b>\n' % self.field('sex_id')
txt += u'Ημερομηνία Γέννησης : <b>%s</b><br>' % self.field('igen')
txt += u'ΑΦΜ : <b>%s</b><br>' % self.field('afm')
txt += u'AMKA : <b>%s</b><br>' % self.field('amka')
txt += u'ΑΜ.ΙΚΑ : <b>%s</b>' % self.field('aika')
txt += u'Πόλη : <b>%s</b><br>' % self.field('pol')
txt += u'Οδός : <b>%s</b>' % self.field('odo')
txt += u'ΑΡθμός : <b>%s</b><br>' % self.field('num')
txt += u'ΤΚ : <b>%s</b><br>' % self.field('tk')
txt += u'Μέρες εργασίας : <b>%s</b>' % self.field('mereser')
txt += u'Αποδοχές : <b>%s</b>' % self.field('apod')
self.label.setText(txt)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
wizard = NewEmpWizard()
wizard.show()
sys.exit(app.exec_())
| tedlaz/pyted | misthodosia/m13a/f_newEmployeeWizard.py | Python | gpl-3.0 | 13,202 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
from hashlib import sha1
from pants.base.build_environment import get_buildroot
from pants.option.custom_types import dict_with_files_option, file_option, target_option
def stable_json_dumps(obj):
return json.dumps(obj, ensure_ascii=True, allow_nan=False, sort_keys=True)
def stable_json_sha1(obj):
return sha1(stable_json_dumps(obj)).hexdigest()
class OptionsFingerprinter(object):
"""Handles fingerprinting options under a given build_graph.
:API: public
"""
def __init__(self, build_graph):
self._build_graph = build_graph
def fingerprint(self, option_type, option_val):
"""Returns a hash of the given option_val based on the option_type.
:API: public
Returns None if option_val is None.
"""
if option_val is None:
return None
# For simplicity, we always fingerprint a list. For non-list-valued options,
# this will be a singleton list.
if not isinstance(option_val, (list, tuple)):
option_val = [option_val]
if option_type == target_option:
return self._fingerprint_target_specs(option_val)
elif option_type == file_option:
return self._fingerprint_files(option_val)
elif option_type == dict_with_files_option:
return self._fingerprint_dict_with_files(option_val)
else:
return self._fingerprint_primitives(option_val)
def _fingerprint_target_specs(self, specs):
"""Returns a fingerprint of the targets resolved from given target specs."""
hasher = sha1()
for spec in sorted(specs):
for target in sorted(self._build_graph.resolve(spec)):
# Not all targets have hashes; in particular, `Dependencies` targets don't.
h = target.compute_invalidation_hash()
if h:
hasher.update(h)
return hasher.hexdigest()
def _assert_in_buildroot(self, filepath):
"""Raises an error if the given filepath isn't in the buildroot.
Returns the normalized, absolute form of the path.
"""
filepath = os.path.normpath(filepath)
root = get_buildroot()
if not os.path.abspath(filepath) == filepath:
# If not absolute, assume relative to the build root.
return os.path.join(root, filepath)
else:
if '..' in os.path.relpath(filepath, root).split(os.path.sep):
# The path wasn't in the buildroot. This is an error because it violates the pants being
# hermetic.
raise ValueError('Received a file_option that was not inside the build root:\n'
' file_option: {filepath}\n'
' build_root: {buildroot}\n'
.format(filepath=filepath, buildroot=root))
return filepath
def _fingerprint_files(self, filepaths):
"""Returns a fingerprint of the given filepaths and their contents.
This assumes the files are small enough to be read into memory.
"""
hasher = sha1()
# Note that we don't sort the filepaths, as their order may have meaning.
for filepath in filepaths:
filepath = self._assert_in_buildroot(filepath)
hasher.update(os.path.relpath(filepath, get_buildroot()))
with open(filepath, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
def _fingerprint_primitives(self, val):
return stable_json_sha1(val)
def _fingerprint_dict_with_files(self, option_val):
"""Returns a fingerprint of the given dictionary containing file paths.
Any value which is a file path which exists on disk will be fingerprinted by that file's
contents rather than by its path.
This assumes the files are small enough to be read into memory.
"""
# Dicts are wrapped in singleton lists. See the "For simplicity..." comment in `fingerprint()`.
option_val = option_val[0]
return stable_json_sha1({k: self._expand_possible_file_value(v) for k, v in option_val.items()})
def _expand_possible_file_value(self, value):
"""If the value is a file, returns its contents. Otherwise return the original value."""
if value and os.path.isfile(str(value)):
with open(value, 'r') as f:
return f.read()
return value
| pombredanne/pants | src/python/pants/option/options_fingerprinter.py | Python | apache-2.0 | 4,437 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Object relational mapping to database (postgresql) module
* Hierarchical structure
* Constraints consistency, validations
* Object meta Data depends on its status
* Optimised processing by complex query (multiple actions at once)
* Default fields value
* Permissions optimisation
* Persistant object: DB postgresql
* Datas conversions
* Multi-level caching system
* 2 different inheritancies
* Fields:
- classicals (varchar, integer, boolean, ...)
- relations (one2many, many2one, many2many)
- functions
"""
import calendar
import collections
import copy
import datetime
import itertools
import logging
import operator
import pickle
import pytz
import re
import simplejson
import time
import traceback
import types
import babel.dates
import dateutil.relativedelta
import psycopg2
from lxml import etree
import fields
import openerp
import openerp.tools as tools
from openerp.tools.config import config
from openerp.tools.misc import CountingStream, DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from query import Query
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__ + '.schema')
# List of etree._Element subclasses that we choose to ignore when parsing XML.
from openerp.tools import SKIPPED_ELEMENT_TYPES
regex_order = re.compile('^( *([a-z0-9:_]+|"[a-z0-9:_]+")( *desc| *asc)?( *, *|))+$', re.I)
regex_object_name = re.compile(r'^[a-z0-9_.]+$')
AUTOINIT_RECALCULATE_STORED_FIELDS = 1000
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
def check_object_name(name):
""" Check if the given name is a valid openerp object name.
The _name attribute in osv and osv_memory object is subject to
some restrictions. This function returns True or False whether
the given name is allowed or not.
TODO: this is an approximation. The goal in this approximation
is to disallow uppercase characters (in some places, we quote
table/column names and in other not, which leads to this kind
of errors:
psycopg2.ProgrammingError: relation "xxx" does not exist).
The same restriction should apply to both osv and osv_memory
objects for consistency.
"""
if regex_object_name.match(name) is None:
return False
return True
def raise_on_invalid_object_name(name):
if not check_object_name(name):
msg = "The _name attribute %s is not valid." % name
_logger.error(msg)
raise except_orm('ValueError', msg)
POSTGRES_CONFDELTYPES = {
'RESTRICT': 'r',
'NO ACTION': 'a',
'CASCADE': 'c',
'SET NULL': 'n',
'SET DEFAULT': 'd',
}
def intersect(la, lb):
return filter(lambda x: x in lb, la)
def fix_import_export_id_paths(fieldname):
"""
Fixes the id fields in import and exports, and splits field paths
on '/'.
:param str fieldname: name of the field to import/export
:return: split field name
:rtype: list of str
"""
fixed_db_id = re.sub(r'([^/])\.id', r'\1/.id', fieldname)
fixed_external_id = re.sub(r'([^/]):id', r'\1/id', fixed_db_id)
return fixed_external_id.split('/')
class except_orm(Exception):
def __init__(self, name, value):
self.name = name
self.value = value
self.args = (name, value)
class BrowseRecordError(Exception):
pass
class browse_null(object):
""" Readonly python database object browser
"""
def __init__(self):
self.id = False
def __getitem__(self, name):
return None
def __getattr__(self, name):
return None # XXX: return self ?
def __int__(self):
return False
def __str__(self):
return ''
def __nonzero__(self):
return False
def __unicode__(self):
return u''
def __iter__(self):
raise NotImplementedError("Iteration is not allowed on %s" % self)
#
# TODO: execute an object method on browse_record_list
#
class browse_record_list(list):
""" Collection of browse objects
Such an instance will be returned when doing a ``browse([ids..])``
and will be iterable, yielding browse() objects
"""
def __init__(self, lst, context=None):
if not context:
context = {}
super(browse_record_list, self).__init__(lst)
self.context = context
class browse_record(object):
""" An object that behaves like a row of an object's table.
It has attributes after the columns of the corresponding object.
Examples::
uobj = pool.get('res.users')
user_rec = uobj.browse(cr, uid, 104)
name = user_rec.name
"""
def __init__(self, cr, uid, id, table, cache, context=None,
list_class=browse_record_list, fields_process=None):
"""
:param table: the browsed object (inherited from orm)
:param dict cache: a dictionary of model->field->data to be shared
across browse objects, thus reducing the SQL
read()s. It can speed up things a lot, but also be
disastrous if not discarded after write()/unlink()
operations
:param dict context: dictionary with an optional context
"""
if fields_process is None:
fields_process = {}
if context is None:
context = {}
self._list_class = list_class
self._cr = cr
self._uid = uid
self._id = id
self._table = table # deprecated, use _model!
self._model = table
self._table_name = self._table._name
self.__logger = logging.getLogger('openerp.osv.orm.browse_record.' + self._table_name)
self._context = context
self._fields_process = fields_process
cache.setdefault(table._name, {})
self._data = cache[table._name]
# if not (id and isinstance(id, (int, long,))):
# raise BrowseRecordError(_('Wrong ID for the browse record, got %r, expected an integer.') % (id,))
# if not table.exists(cr, uid, id, context):
# raise BrowseRecordError(_('Object %s does not exists') % (self,))
if id not in self._data:
self._data[id] = {'id': id}
self._cache = cache
def __getitem__(self, name):
if name == 'id':
return self._id
if name not in self._data[self._id]:
# build the list of fields we will fetch
# fetch the definition of the field which was asked for
if name in self._table._columns:
col = self._table._columns[name]
elif name in self._table._inherit_fields:
col = self._table._inherit_fields[name][2]
elif hasattr(self._table, str(name)):
attr = getattr(self._table, name)
if isinstance(attr, (types.MethodType, types.LambdaType, types.FunctionType)):
def function_proxy(*args, **kwargs):
if 'context' not in kwargs and self._context:
kwargs.update(context=self._context)
return attr(self._cr, self._uid, [self._id], *args, **kwargs)
return function_proxy
else:
return attr
else:
error_msg = "Field '%s' does not exist in object '%s'" % (name, self)
self.__logger.warning(error_msg)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug(''.join(traceback.format_stack()))
raise KeyError(error_msg)
prefetchable = lambda f: f._classic_write and f._prefetch and not f.groups and not f.deprecated
# if the field is a classic one or a many2one, we'll fetch all classic and many2one fields
if prefetchable(col):
# gen the list of "local" (ie not inherited) fields which are classic or many2one
field_filter = lambda x: prefetchable(x[1])
fields_to_fetch = filter(field_filter, self._table._columns.items())
# gen the list of inherited fields
inherits = map(lambda x: (x[0], x[1][2]), self._table._inherit_fields.items())
# complete the field list with the inherited fields which are classic or many2one
fields_to_fetch += filter(field_filter, inherits)
# otherwise we fetch only that field
else:
fields_to_fetch = [(name, col)]
ids = filter(lambda id: name not in self._data[id], self._data.keys())
# read the results
field_names = map(lambda x: x[0], fields_to_fetch)
try:
field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
except (openerp.exceptions.AccessError, except_orm):
if len(ids) == 1:
raise
# prefetching attempt failed, perhaps we're violating ACL restrictions involuntarily
_logger.info('Prefetching attempt for fields %s on %s failed for ids %s, re-trying just for id %s', field_names, self._model._name, ids, self._id)
ids = [self._id]
field_values = self._table.read(self._cr, self._uid, ids, field_names, context=self._context, load="_classic_write")
# TODO: improve this, very slow for reports
if self._fields_process:
lang = self._context.get('lang', 'en_US') or 'en_US'
lang_obj_ids = self.pool.get('res.lang').search(self._cr, self._uid, [('code', '=', lang)])
if not lang_obj_ids:
raise Exception(_('Language with code "%s" is not defined in your system !\nDefine it through the Administration menu.') % (lang,))
lang_obj = self.pool.get('res.lang').browse(self._cr, self._uid, lang_obj_ids[0])
for field_name, field_column in fields_to_fetch:
if field_column._type in self._fields_process:
for result_line in field_values:
result_line[field_name] = self._fields_process[field_column._type](result_line[field_name])
if result_line[field_name]:
result_line[field_name].set_value(self._cr, self._uid, result_line[field_name], self, field_column, lang_obj)
if not field_values:
# Where did those ids come from? Perhaps old entries in ir_model_dat?
_logger.warning("No field_values found for ids %s in %s", ids, self)
raise KeyError('Field %s not found in %s'%(name, self))
# create browse records for 'remote' objects
for result_line in field_values:
new_data = {}
for field_name, field_column in fields_to_fetch:
if field_column._type == 'many2one':
if result_line[field_name]:
obj = self._table.pool[field_column._obj]
if isinstance(result_line[field_name], (list, tuple)):
value = result_line[field_name][0]
else:
value = result_line[field_name]
if value:
# FIXME: this happen when a _inherits object
# overwrite a field of it parent. Need
# testing to be sure we got the right
# object and not the parent one.
if not isinstance(value, browse_record):
if obj is None:
# In some cases the target model is not available yet, so we must ignore it,
# which is safe in most cases, this value will just be loaded later when needed.
# This situation can be caused by custom fields that connect objects with m2o without
# respecting module dependencies, causing relationships to be connected to soon when
# the target is not loaded yet.
continue
new_data[field_name] = browse_record(self._cr,
self._uid, value, obj, self._cache,
context=self._context,
list_class=self._list_class,
fields_process=self._fields_process)
else:
new_data[field_name] = value
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
elif field_column._type in ('one2many', 'many2many') and len(result_line[field_name]):
new_data[field_name] = self._list_class(
(browse_record(self._cr, self._uid, id, self._table.pool.get(field_column._obj),
self._cache, context=self._context, list_class=self._list_class,
fields_process=self._fields_process)
for id in result_line[field_name]),
context=self._context)
elif field_column._type == 'reference':
if result_line[field_name]:
if isinstance(result_line[field_name], browse_record):
new_data[field_name] = result_line[field_name]
else:
ref_obj, ref_id = result_line[field_name].split(',')
ref_id = long(ref_id)
if ref_id:
obj = self._table.pool[ref_obj]
new_data[field_name] = browse_record(self._cr, self._uid, ref_id, obj, self._cache, context=self._context, list_class=self._list_class, fields_process=self._fields_process)
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = browse_null()
else:
new_data[field_name] = result_line[field_name]
self._data[result_line['id']].update(new_data)
if not name in self._data[self._id]:
# How did this happen? Could be a missing model due to custom fields used too soon, see above.
self.__logger.error("Fields to fetch: %s, Field values: %s", field_names, field_values)
self.__logger.error("Cached: %s, Table: %s", self._data[self._id], self._table)
raise KeyError(_('Unknown attribute %s in %s ') % (name, self))
return self._data[self._id][name]
def __getattr__(self, name):
try:
return self[name]
except KeyError, e:
import sys
exc_info = sys.exc_info()
raise AttributeError, "Got %r while trying to get attribute %s on a %s record." % (e, name, self._table._name), exc_info[2]
def __contains__(self, name):
return (name in self._table._columns) or (name in self._table._inherit_fields) or hasattr(self._table, name)
def __iter__(self):
raise NotImplementedError("Iteration is not allowed on %s" % self)
def __hasattr__(self, name):
return name in self
def __int__(self):
return self._id
def __str__(self):
return "browse_record(%s, %s)" % (self._table_name, self._id)
def __eq__(self, other):
if not isinstance(other, browse_record):
return False
return (self._table_name, self._id) == (other._table_name, other._id)
def __ne__(self, other):
if not isinstance(other, browse_record):
return True
return (self._table_name, self._id) != (other._table_name, other._id)
# we need to define __unicode__ even though we've already defined __str__
# because we have overridden __getattr__
def __unicode__(self):
return unicode(str(self))
def __hash__(self):
return hash((self._table_name, self._id))
__repr__ = __str__
def refresh(self):
"""Force refreshing this browse_record's data and all the data of the
records that belong to the same cache, by emptying the cache completely,
preserving only the record identifiers (for prefetching optimizations).
"""
for model, model_cache in self._cache.iteritems():
# only preserve the ids of the records that were in the cache
cached_ids = dict([(i, {'id': i}) for i in model_cache.keys()])
self._cache[model].clear()
self._cache[model].update(cached_ids)
def pg_varchar(size=0):
""" Returns the VARCHAR declaration for the provided size:
* If no size (or an empty or negative size is provided) return an
'infinite' VARCHAR
* Otherwise return a VARCHAR(n)
:type int size: varchar size, optional
:rtype: str
"""
if size:
if not isinstance(size, int):
raise TypeError("VARCHAR parameter should be an int, got %s"
% type(size))
if size > 0:
return 'VARCHAR(%d)' % size
return 'VARCHAR'
FIELDS_TO_PGTYPES = {
fields.boolean: 'bool',
fields.integer: 'int4',
fields.text: 'text',
fields.html: 'text',
fields.date: 'date',
fields.datetime: 'timestamp',
fields.binary: 'bytea',
fields.many2one: 'int4',
fields.serialized: 'text',
}
def get_pg_type(f, type_override=None):
"""
:param fields._column f: field to get a Postgres type for
:param type type_override: use the provided type for dispatching instead of the field's own type
:returns: (postgres_identification_type, postgres_type_specification)
:rtype: (str, str)
"""
field_type = type_override or type(f)
if field_type in FIELDS_TO_PGTYPES:
pg_type = (FIELDS_TO_PGTYPES[field_type], FIELDS_TO_PGTYPES[field_type])
elif issubclass(field_type, fields.float):
if f.digits:
pg_type = ('numeric', 'NUMERIC')
else:
pg_type = ('float8', 'DOUBLE PRECISION')
elif issubclass(field_type, (fields.char, fields.reference)):
pg_type = ('varchar', pg_varchar(f.size))
elif issubclass(field_type, fields.selection):
if (isinstance(f.selection, list) and isinstance(f.selection[0][0], int))\
or getattr(f, 'size', None) == -1:
pg_type = ('int4', 'INTEGER')
else:
pg_type = ('varchar', pg_varchar(getattr(f, 'size', None)))
elif issubclass(field_type, fields.function):
if f._type == 'selection':
pg_type = ('varchar', pg_varchar())
else:
pg_type = get_pg_type(f, getattr(fields, f._type))
else:
_logger.warning('%s type not supported!', field_type)
pg_type = None
return pg_type
class MetaModel(type):
""" Metaclass for the Model.
This class is used as the metaclass for the Model class to discover
the models defined in a module (i.e. without instanciating them).
If the automatic discovery is not needed, it is possible to set the
model's _register attribute to False.
"""
module_to_models = {}
def __init__(self, name, bases, attrs):
if not self._register:
self._register = True
super(MetaModel, self).__init__(name, bases, attrs)
return
# The (OpenERP) module name can be in the `openerp.addons` namespace
# or not. For instance module `sale` can be imported as
# `openerp.addons.sale` (the good way) or `sale` (for backward
# compatibility).
module_parts = self.__module__.split('.')
if len(module_parts) > 2 and module_parts[0] == 'openerp' and \
module_parts[1] == 'addons':
module_name = self.__module__.split('.')[2]
else:
module_name = self.__module__.split('.')[0]
if not hasattr(self, '_module'):
self._module = module_name
# Remember which models to instanciate for this module.
if not self._custom:
self.module_to_models.setdefault(self._module, []).append(self)
# Definition of log access columns, automatically added to models if
# self._log_access is True
LOG_ACCESS_COLUMNS = {
'create_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'create_date': 'TIMESTAMP',
'write_uid': 'INTEGER REFERENCES res_users ON DELETE SET NULL',
'write_date': 'TIMESTAMP'
}
# special columns automatically created by the ORM
MAGIC_COLUMNS = ['id'] + LOG_ACCESS_COLUMNS.keys()
class BaseModel(object):
""" Base class for OpenERP models.
OpenERP models are created by inheriting from this class' subclasses:
* Model: for regular database-persisted models
* TransientModel: for temporary data, stored in the database but automatically
vaccuumed every so often
* AbstractModel: for abstract super classes meant to be shared by multiple
_inheriting classes (usually Models or TransientModels)
The system will later instantiate the class once per database (on
which the class' module is installed).
To create a class that should not be instantiated, the _register class attribute
may be set to False.
"""
__metaclass__ = MetaModel
_auto = True # create database backend
_register = False # Set to false if the model shouldn't be automatically discovered.
_name = None
_columns = {}
_constraints = []
_custom = False
_defaults = {}
_rec_name = None
_parent_name = 'parent_id'
_parent_store = False
_parent_order = False
_date_name = 'date'
_order = 'id'
_sequence = None
_description = None
_needaction = False
# dict of {field:method}, with method returning the (name_get of records, {id: fold})
# to include in the _read_group, if grouped on this field
_group_by_full = {}
# Transience
_transient = False # True in a TransientModel
# structure:
# { 'parent_model': 'm2o_field', ... }
_inherits = {}
# Mapping from inherits'd field name to triple (m, r, f, n) where m is the
# model from which it is inherits'd, r is the (local) field towards m, f
# is the _column object itself, and n is the original (i.e. top-most)
# parent model.
# Example:
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
_inherit_fields = {}
# Mapping field name/column_info object
# This is similar to _inherit_fields but:
# 1. includes self fields,
# 2. uses column_info instead of a triple.
_all_columns = {}
_table = None
_log_create = False
_sql_constraints = []
_protected = ['read', 'write', 'create', 'default_get', 'perm_read', 'unlink', 'fields_get', 'fields_view_get', 'search', 'name_get', 'distinct_field_get', 'name_search', 'copy', 'import_data', 'search_count', 'exists']
CONCURRENCY_CHECK_FIELD = '__last_update'
def log(self, cr, uid, id, message, secondary=False, context=None):
return _logger.warning("log() is deprecated. Please use OpenChatter notification system instead of the res.log mechanism.")
def view_init(self, cr, uid, fields_list, context=None):
"""Override this method to do specific things when a view on the object is opened."""
pass
def _field_create(self, cr, context=None):
""" Create entries in ir_model_fields for all the model's fields.
If necessary, also create an entry in ir_model, and if called from the
modules loading scheme (by receiving 'module' in the context), also
create entries in ir_model_data (for the model and the fields).
- create an entry in ir_model (if there is not already one),
- create an entry in ir_model_data (if there is not already one, and if
'module' is in the context),
- update ir_model_fields with the fields found in _columns
(TODO there is some redundancy as _columns is updated from
ir_model_fields in __init__).
"""
if context is None:
context = {}
cr.execute("SELECT id FROM ir_model WHERE model=%s", (self._name,))
if not cr.rowcount:
cr.execute('SELECT nextval(%s)', ('ir_model_id_seq',))
model_id = cr.fetchone()[0]
cr.execute("INSERT INTO ir_model (id,model, name, info,state) VALUES (%s, %s, %s, %s, %s)", (model_id, self._name, self._description, self.__doc__, 'base'))
else:
model_id = cr.fetchone()[0]
if 'module' in context:
name_id = 'model_'+self._name.replace('.', '_')
cr.execute('select * from ir_model_data where name=%s and module=%s', (name_id, context['module']))
if not cr.rowcount:
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name_id, context['module'], 'ir.model', model_id)
)
cr.execute("SELECT * FROM ir_model_fields WHERE model=%s", (self._name,))
cols = {}
for rec in cr.dictfetchall():
cols[rec['name']] = rec
ir_model_fields_obj = self.pool.get('ir.model.fields')
# sparse field should be created at the end, as it depends on its serialized field already existing
model_fields = sorted(self._columns.items(), key=lambda x: 1 if x[1]._type == 'sparse' else 0)
for (k, f) in model_fields:
vals = {
'model_id': model_id,
'model': self._name,
'name': k,
'field_description': f.string,
'ttype': f._type,
'relation': f._obj or '',
'select_level': tools.ustr(f.select or 0),
'readonly': (f.readonly and 1) or 0,
'required': (f.required and 1) or 0,
'selectable': (f.selectable and 1) or 0,
'translate': (f.translate and 1) or 0,
'relation_field': f._fields_id if isinstance(f, fields.one2many) else '',
'serialization_field_id': None,
}
if getattr(f, 'serialization_field', None):
# resolve link to serialization_field if specified by name
serialization_field_id = ir_model_fields_obj.search(cr, SUPERUSER_ID, [('model','=',vals['model']), ('name', '=', f.serialization_field)])
if not serialization_field_id:
raise except_orm(_('Error'), _("Serialization field `%s` not found for sparse field `%s`!") % (f.serialization_field, k))
vals['serialization_field_id'] = serialization_field_id[0]
# When its a custom field,it does not contain f.select
if context.get('field_state', 'base') == 'manual':
if context.get('field_name', '') == k:
vals['select_level'] = context.get('select', '0')
#setting value to let the problem NOT occur next time
elif k in cols:
vals['select_level'] = cols[k]['select_level']
if k not in cols:
cr.execute('select nextval(%s)', ('ir_model_fields_id_seq',))
id = cr.fetchone()[0]
vals['id'] = id
cr.execute("""INSERT INTO ir_model_fields (
id, model_id, model, name, field_description, ttype,
relation,state,select_level,relation_field, translate, serialization_field_id
) VALUES (
%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s
)""", (
id, vals['model_id'], vals['model'], vals['name'], vals['field_description'], vals['ttype'],
vals['relation'], 'base',
vals['select_level'], vals['relation_field'], bool(vals['translate']), vals['serialization_field_id']
))
if 'module' in context:
name1 = 'field_' + self._table + '_' + k
cr.execute("select name from ir_model_data where name=%s", (name1,))
if cr.fetchone():
name1 = name1 + "_" + str(id)
cr.execute("INSERT INTO ir_model_data (name,date_init,date_update,module,model,res_id) VALUES (%s, (now() at time zone 'UTC'), (now() at time zone 'UTC'), %s, %s, %s)", \
(name1, context['module'], 'ir.model.fields', id)
)
else:
for key, val in vals.items():
if cols[k][key] != vals[key]:
cr.execute('update ir_model_fields set field_description=%s where model=%s and name=%s', (vals['field_description'], vals['model'], vals['name']))
cr.execute("""UPDATE ir_model_fields SET
model_id=%s, field_description=%s, ttype=%s, relation=%s,
select_level=%s, readonly=%s ,required=%s, selectable=%s, relation_field=%s, translate=%s, serialization_field_id=%s
WHERE
model=%s AND name=%s""", (
vals['model_id'], vals['field_description'], vals['ttype'],
vals['relation'],
vals['select_level'], bool(vals['readonly']), bool(vals['required']), bool(vals['selectable']), vals['relation_field'], bool(vals['translate']), vals['serialization_field_id'], vals['model'], vals['name']
))
break
#
# Goal: try to apply inheritance at the instanciation level and
# put objects in the pool var
#
@classmethod
def create_instance(cls, pool, cr):
""" Instanciate a given model.
This class method instanciates the class of some model (i.e. a class
deriving from osv or osv_memory). The class might be the class passed
in argument or, if it inherits from another class, a class constructed
by combining the two classes.
The ``attributes`` argument specifies which parent class attributes
have to be combined.
TODO: the creation of the combined class is repeated at each call of
this method. This is probably unnecessary.
"""
attributes = ['_columns', '_defaults', '_inherits', '_constraints',
'_sql_constraints']
parent_names = getattr(cls, '_inherit', None)
if parent_names:
if isinstance(parent_names, (str, unicode)):
name = cls._name or parent_names
parent_names = [parent_names]
else:
name = cls._name
if not name:
raise TypeError('_name is mandatory in case of multiple inheritance')
for parent_name in ((type(parent_names)==list) and parent_names or [parent_names]):
if parent_name not in pool:
raise TypeError('The model "%s" specifies an unexisting parent class "%s"\n'
'You may need to add a dependency on the parent class\' module.' % (name, parent_name))
parent_model = pool[parent_name]
if not getattr(cls, '_original_module', None) and name == parent_model._name:
cls._original_module = parent_model._original_module
parent_class = parent_model.__class__
nattr = {}
for s in attributes:
new = copy.copy(getattr(parent_model, s, {}))
if s == '_columns':
# Don't _inherit custom fields.
for c in new.keys():
if new[c].manual:
del new[c]
if hasattr(new, 'update'):
new.update(cls.__dict__.get(s, {}))
elif s=='_constraints':
for c in cls.__dict__.get(s, []):
exist = False
for c2 in range(len(new)):
#For _constraints, we should check field and methods as well
if new[c2][2]==c[2] and (new[c2][0] == c[0] \
or getattr(new[c2][0],'__name__', True) == \
getattr(c[0],'__name__', False)):
# If new class defines a constraint with
# same function name, we let it override
# the old one.
new[c2] = c
exist = True
break
if not exist:
new.append(c)
else:
new.extend(cls.__dict__.get(s, []))
nattr[s] = new
# Keep links to non-inherited constraints, e.g. useful when exporting translations
nattr['_local_constraints'] = cls.__dict__.get('_constraints', [])
nattr['_local_sql_constraints'] = cls.__dict__.get('_sql_constraints', [])
cls = type(name, (cls, parent_class), dict(nattr, _register=False))
else:
cls._local_constraints = getattr(cls, '_constraints', [])
cls._local_sql_constraints = getattr(cls, '_sql_constraints', [])
if not getattr(cls, '_original_module', None):
cls._original_module = cls._module
obj = object.__new__(cls)
if hasattr(obj, '_columns'):
# float fields are registry-dependent (digit attribute). Duplicate them to avoid issues.
for c, f in obj._columns.items():
if f._type == 'float':
obj._columns[c] = copy.copy(f)
obj.__init__(pool, cr)
return obj
def __new__(cls):
"""Register this model.
This doesn't create an instance but simply register the model
as being part of the module where it is defined.
"""
# Set the module name (e.g. base, sale, accounting, ...) on the class.
module = cls.__module__.split('.')[0]
if not hasattr(cls, '_module'):
cls._module = module
# Record this class in the list of models to instantiate for this module,
# managed by the metaclass.
module_model_list = MetaModel.module_to_models.setdefault(cls._module, [])
if cls not in module_model_list:
if not cls._custom:
module_model_list.append(cls)
# Since we don't return an instance here, the __init__
# method won't be called.
return None
def __init__(self, pool, cr):
""" Initialize a model and make it part of the given registry.
- copy the stored fields' functions in the osv_pool,
- update the _columns with the fields found in ir_model_fields,
- ensure there is a many2one for each _inherits'd parent,
- update the children's _columns,
- give a chance to each field to initialize itself.
"""
pool.add(self._name, self)
self.pool = pool
if not self._name and not hasattr(self, '_inherit'):
name = type(self).__name__.split('.')[0]
msg = "The class %s has to have a _name attribute" % name
_logger.error(msg)
raise except_orm('ValueError', msg)
if not self._description:
self._description = self._name
if not self._table:
self._table = self._name.replace('.', '_')
if not hasattr(self, '_log_access'):
# If _log_access is not specified, it is the same value as _auto.
self._log_access = getattr(self, "_auto", True)
self._columns = self._columns.copy()
for store_field in self._columns:
f = self._columns[store_field]
if hasattr(f, 'digits_change'):
f.digits_change(cr)
def not_this_field(stored_func):
x, y, z, e, f, l = stored_func
return x != self._name or y != store_field
self.pool._store_function[self._name] = filter(not_this_field, self.pool._store_function.get(self._name, []))
if not isinstance(f, fields.function):
continue
if not f.store:
continue
sm = f.store
if sm is True:
sm = {self._name: (lambda self, cr, uid, ids, c={}: ids, None, f.priority, None)}
for object, aa in sm.items():
if len(aa) == 4:
(fnct, fields2, order, length) = aa
elif len(aa) == 3:
(fnct, fields2, order) = aa
length = None
else:
raise except_orm('Error',
('Invalid function definition %s in object %s !\nYou must use the definition: store={object:(fnct, fields, priority, time length)}.' % (store_field, self._name)))
self.pool._store_function.setdefault(object, [])
t = (self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length)
if not t in self.pool._store_function[object]:
self.pool._store_function[object].append((self._name, store_field, fnct, tuple(fields2) if fields2 else None, order, length))
self.pool._store_function[object].sort(lambda x, y: cmp(x[4], y[4]))
for (key, _, msg) in self._sql_constraints:
self.pool._sql_error[self._table+'_'+key] = msg
# Load manual fields
# Check the query is already done for all modules of if we need to
# do it ourselves.
if self.pool.fields_by_model is not None:
manual_fields = self.pool.fields_by_model.get(self._name, [])
else:
cr.execute('SELECT * FROM ir_model_fields WHERE model=%s AND state=%s', (self._name, 'manual'))
manual_fields = cr.dictfetchall()
for field in manual_fields:
if field['name'] in self._columns:
continue
attrs = {
'string': field['field_description'],
'required': bool(field['required']),
'readonly': bool(field['readonly']),
'domain': eval(field['domain']) if field['domain'] else None,
'size': field['size'] or None,
'ondelete': field['on_delete'],
'translate': (field['translate']),
'manual': True,
'_prefetch': False,
#'select': int(field['select_level'])
}
if field['serialization_field_id']:
cr.execute('SELECT name FROM ir_model_fields WHERE id=%s', (field['serialization_field_id'],))
attrs.update({'serialization_field': cr.fetchone()[0], 'type': field['ttype']})
if field['ttype'] in ['many2one', 'one2many', 'many2many']:
attrs.update({'relation': field['relation']})
self._columns[field['name']] = fields.sparse(**attrs)
elif field['ttype'] == 'selection':
self._columns[field['name']] = fields.selection(eval(field['selection']), **attrs)
elif field['ttype'] == 'reference':
self._columns[field['name']] = fields.reference(selection=eval(field['selection']), **attrs)
elif field['ttype'] == 'many2one':
self._columns[field['name']] = fields.many2one(field['relation'], **attrs)
elif field['ttype'] == 'one2many':
self._columns[field['name']] = fields.one2many(field['relation'], field['relation_field'], **attrs)
elif field['ttype'] == 'many2many':
_rel1 = field['relation'].replace('.', '_')
_rel2 = field['model'].replace('.', '_')
_rel_name = 'x_%s_%s_%s_rel' % (_rel1, _rel2, field['name'])
self._columns[field['name']] = fields.many2many(field['relation'], _rel_name, 'id1', 'id2', **attrs)
else:
self._columns[field['name']] = getattr(fields, field['ttype'])(**attrs)
self._inherits_check()
self._inherits_reload()
if not self._sequence:
self._sequence = self._table + '_id_seq'
for k in self._defaults:
assert (k in self._columns) or (k in self._inherit_fields), 'Default function defined in %s but field %s does not exist !' % (self._name, k,)
for f in self._columns:
self._columns[f].restart()
# Transience
if self.is_transient():
self._transient_check_count = 0
self._transient_max_count = config.get('osv_memory_count_limit')
self._transient_max_hours = config.get('osv_memory_age_limit')
assert self._log_access, "TransientModels must have log_access turned on, "\
"in order to implement their access rights policy"
# Validate rec_name
if self._rec_name is not None:
assert self._rec_name in self._all_columns.keys() + ['id'], "Invalid rec_name %s for model %s" % (self._rec_name, self._name)
else:
self._rec_name = 'name'
def __export_row(self, cr, uid, row, fields, raw_data=False, context=None):
if context is None:
context = {}
def check_type(field_type):
if field_type == 'float':
return 0.0
elif field_type == 'integer':
return 0
elif field_type == 'boolean':
return 'False'
return ''
def selection_field(in_field):
col_obj = self.pool[in_field.keys()[0]]
if f[i] in col_obj._columns.keys():
return col_obj._columns[f[i]]
elif f[i] in col_obj._inherits.keys():
selection_field(col_obj._inherits)
else:
return False
def _get_xml_id(self, cr, uid, r):
model_data = self.pool.get('ir.model.data')
data_ids = model_data.search(cr, uid, [('model', '=', r._model._name), ('res_id', '=', r['id'])])
if len(data_ids):
d = model_data.read(cr, uid, data_ids, ['name', 'module'])[0]
if d['module']:
r = '%s.%s' % (d['module'], d['name'])
else:
r = d['name']
else:
postfix = 0
while True:
n = r._model._table+'_'+str(r['id']) + (postfix and ('_'+str(postfix)) or '' )
if not model_data.search(cr, uid, [('name', '=', n)]):
break
postfix += 1
model_data.create(cr, SUPERUSER_ID, {
'name': n,
'model': r._model._name,
'res_id': r['id'],
'module': '__export__',
})
r = '__export__.'+n
return r
lines = []
data = map(lambda x: '', range(len(fields)))
done = []
for fpos in range(len(fields)):
f = fields[fpos]
if f:
r = row
i = 0
while i < len(f):
cols = False
if f[i] == '.id':
r = r['id']
elif f[i] == 'id':
r = _get_xml_id(self, cr, uid, r)
else:
r = r[f[i]]
# To display external name of selection field when its exported
if f[i] in self._columns.keys():
cols = self._columns[f[i]]
elif f[i] in self._inherit_fields.keys():
cols = selection_field(self._inherits)
if cols and cols._type == 'selection':
sel_list = cols.selection
if r and type(sel_list) == type([]):
r = [x[1] for x in sel_list if r==x[0]]
r = r and r[0] or False
if not r:
if f[i] in self._columns:
r = check_type(self._columns[f[i]]._type)
elif f[i] in self._inherit_fields:
r = check_type(self._inherit_fields[f[i]][2]._type)
data[fpos] = r or False
break
if isinstance(r, (browse_record_list, list)):
first = True
fields2 = map(lambda x: (x[:i+1]==f[:i+1] and x[i+1:]) \
or [], fields)
if fields2 in done:
if [x for x in fields2 if x]:
break
done.append(fields2)
if cols and cols._type=='many2many' and len(fields[fpos])>(i+1) and (fields[fpos][i+1]=='id'):
data[fpos] = ','.join([_get_xml_id(self, cr, uid, x) for x in r])
break
for row2 in r:
lines2 = row2._model.__export_row(cr, uid, row2, fields2, context=context)
if first:
for fpos2 in range(len(fields)):
if lines2 and lines2[0][fpos2]:
data[fpos2] = lines2[0][fpos2]
if not data[fpos]:
dt = ''
for rr in r:
name_relation = self.pool[rr._table_name]._rec_name
if isinstance(rr[name_relation], browse_record):
rr = rr[name_relation]
rr_name = self.pool[rr._table_name].name_get(cr, uid, [rr.id], context=context)
rr_name = rr_name and rr_name[0] and rr_name[0][1] or ''
dt += tools.ustr(rr_name or '') + ','
data[fpos] = dt[:-1]
break
lines += lines2[1:]
first = False
else:
lines += lines2
break
i += 1
if i == len(f):
if isinstance(r, browse_record):
r = self.pool[r._table_name].name_get(cr, uid, [r.id], context=context)
r = r and r[0] and r[0][1] or ''
if raw_data and cols and cols._type in ('integer', 'boolean', 'float'):
data[fpos] = r
elif raw_data and cols and cols._type == 'date':
data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATE_FORMAT).date()
elif raw_data and cols and cols._type == 'datetime':
data[fpos] = datetime.datetime.strptime(r, tools.DEFAULT_SERVER_DATETIME_FORMAT)
else:
data[fpos] = tools.ustr(r or '')
return [data] + lines
def export_data(self, cr, uid, ids, fields_to_export, raw_data=False, context=None):
"""
Export fields for selected objects
:param cr: database cursor
:param uid: current user id
:param ids: list of ids
:param fields_to_export: list of fields
:param raw_data: True to return value in fields type, False for string values
:param context: context arguments, like lang, time zone
:rtype: dictionary with a *datas* matrix
This method is used when exporting data via client menu
"""
if context is None:
context = {}
cols = self._columns.copy()
for f in self._inherit_fields:
cols.update({f: self._inherit_fields[f][2]})
fields_to_export = map(fix_import_export_id_paths, fields_to_export)
datas = []
for row in self.browse(cr, uid, ids, context):
datas += self.__export_row(cr, uid, row, fields_to_export, raw_data=raw_data, context=context)
return {'datas': datas}
def import_data(self, cr, uid, fields, datas, mode='init', current_module='', noupdate=False, context=None, filename=None):
"""
.. deprecated:: 7.0
Use :meth:`~load` instead
Import given data in given module
This method is used when importing data via client menu.
Example of fields to import for a sale.order::
.id, (=database_id)
partner_id, (=name_search)
order_line/.id, (=database_id)
order_line/name,
order_line/product_id/id, (=xml id)
order_line/price_unit,
order_line/product_uom_qty,
order_line/product_uom/id (=xml_id)
This method returns a 4-tuple with the following structure::
(return_code, errored_resource, error_message, unused)
* The first item is a return code, it is ``-1`` in case of
import error, or the last imported row number in case of success
* The second item contains the record data dict that failed to import
in case of error, otherwise it's 0
* The third item contains an error message string in case of error,
otherwise it's 0
* The last item is currently unused, with no specific semantics
:param fields: list of fields to import
:param datas: data to import
:param mode: 'init' or 'update' for record creation
:param current_module: module name
:param noupdate: flag for record creation
:param filename: optional file to store partial import state for recovery
:returns: 4-tuple in the form (return_code, errored_resource, error_message, unused)
:rtype: (int, dict or 0, str or 0, str or 0)
"""
context = dict(context) if context is not None else {}
context['_import_current_module'] = current_module
fields = map(fix_import_export_id_paths, fields)
ir_model_data_obj = self.pool.get('ir.model.data')
def log(m):
if m['type'] == 'error':
raise Exception(m['message'])
if config.get('import_partial') and filename:
with open(config.get('import_partial'), 'rb') as partial_import_file:
data = pickle.load(partial_import_file)
position = data.get(filename, 0)
position = 0
try:
for res_id, xml_id, res, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, datas,
context=context, log=log),
context=context, log=log):
ir_model_data_obj._update(cr, uid, self._name,
current_module, res, mode=mode, xml_id=xml_id,
noupdate=noupdate, res_id=res_id, context=context)
position = info.get('rows', {}).get('to', 0) + 1
if config.get('import_partial') and filename and (not (position%100)):
with open(config.get('import_partial'), 'rb') as partial_import:
data = pickle.load(partial_import)
data[filename] = position
with open(config.get('import_partial'), 'wb') as partial_import:
pickle.dump(data, partial_import)
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
cr.commit()
except Exception, e:
cr.rollback()
return -1, {}, 'Line %d : %s' % (position + 1, tools.ustr(e)), ''
if context.get('defer_parent_store_computation'):
self._parent_store_compute(cr)
return position, 0, 0, 0
def load(self, cr, uid, fields, data, context=None):
"""
Attempts to load the data matrix, and returns a list of ids (or
``False`` if there was an error and no id could be generated) and a
list of messages.
The ids are those of the records created and saved (in database), in
the same order they were extracted from the file. They can be passed
directly to :meth:`~read`
:param fields: list of fields to import, at the same index as the corresponding data
:type fields: list(str)
:param data: row-major matrix of data to import
:type data: list(list(str))
:param dict context:
:returns: {ids: list(int)|False, messages: [Message]}
"""
cr.execute('SAVEPOINT model_load')
messages = []
fields = map(fix_import_export_id_paths, fields)
ModelData = self.pool['ir.model.data'].clear_caches()
fg = self.fields_get(cr, uid, context=context)
mode = 'init'
current_module = ''
noupdate = False
ids = []
for id, xid, record, info in self._convert_records(cr, uid,
self._extract_records(cr, uid, fields, data,
context=context, log=messages.append),
context=context, log=messages.append):
try:
cr.execute('SAVEPOINT model_load_save')
except psycopg2.InternalError, e:
# broken transaction, exit and hope the source error was
# already logged
if not any(message['type'] == 'error' for message in messages):
messages.append(dict(info, type='error',message=
u"Unknown database error: '%s'" % e))
break
try:
ids.append(ModelData._update(cr, uid, self._name,
current_module, record, mode=mode, xml_id=xid,
noupdate=noupdate, res_id=id, context=context))
cr.execute('RELEASE SAVEPOINT model_load_save')
except psycopg2.Warning, e:
messages.append(dict(info, type='warning', message=str(e)))
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
except psycopg2.Error, e:
messages.append(dict(
info, type='error',
**PGERROR_TO_OE[e.pgcode](self, fg, info, e)))
# Failed to write, log to messages, rollback savepoint (to
# avoid broken transaction) and keep going
cr.execute('ROLLBACK TO SAVEPOINT model_load_save')
if any(message['type'] == 'error' for message in messages):
cr.execute('ROLLBACK TO SAVEPOINT model_load')
ids = False
return {'ids': ids, 'messages': messages}
def _extract_records(self, cr, uid, fields_, data,
context=None, log=lambda a: None):
""" Generates record dicts from the data sequence.
The result is a generator of dicts mapping field names to raw
(unconverted, unvalidated) values.
For relational fields, if sub-fields were provided the value will be
a list of sub-records
The following sub-fields may be set on the record (by key):
* None is the name_get for the record (to use with name_create/name_search)
* "id" is the External ID for the record
* ".id" is the Database ID for the record
"""
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
# Fake columns to avoid special cases in extractor
columns[None] = fields.char('rec_name')
columns['id'] = fields.char('External ID')
columns['.id'] = fields.integer('Database ID')
# m2o fields can't be on multiple lines so exclude them from the
# is_relational field rows filter, but special-case it later on to
# be handled with relational fields (as it can have subfields)
is_relational = lambda field: columns[field]._type in ('one2many', 'many2many', 'many2one')
get_o2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type == 'one2many'])
get_nono2m_values = itemgetter_tuple(
[index for index, field in enumerate(fields_)
if columns[field[0]]._type != 'one2many'])
# Checks if the provided row has any non-empty non-relational field
def only_o2m_values(row, f=get_nono2m_values, g=get_o2m_values):
return any(g(row)) and not any(f(row))
index = 0
while True:
if index >= len(data): return
row = data[index]
# copy non-relational fields to record dict
record = dict((field[0], value)
for field, value in itertools.izip(fields_, row)
if not is_relational(field[0]))
# Get all following rows which have relational values attached to
# the current record (no non-relational values)
record_span = itertools.takewhile(
only_o2m_values, itertools.islice(data, index + 1, None))
# stitch record row back on for relational fields
record_span = list(itertools.chain([row], record_span))
for relfield in set(
field[0] for field in fields_
if is_relational(field[0])):
column = columns[relfield]
# FIXME: how to not use _obj without relying on fields_get?
Model = self.pool[column._obj]
# get only cells for this sub-field, should be strictly
# non-empty, field path [None] is for name_get column
indices, subfields = zip(*((index, field[1:] or [None])
for index, field in enumerate(fields_)
if field[0] == relfield))
# return all rows which have at least one value for the
# subfields of relfield
relfield_data = filter(any, map(itemgetter_tuple(indices), record_span))
record[relfield] = [subrecord
for subrecord, _subinfo in Model._extract_records(
cr, uid, subfields, relfield_data,
context=context, log=log)]
yield record, {'rows': {
'from': index,
'to': index + len(record_span) - 1
}}
index += len(record_span)
def _convert_records(self, cr, uid, records,
context=None, log=lambda a: None):
""" Converts records from the source iterable (recursive dicts of
strings) into forms which can be written to the database (via
self.create or (ir.model.data)._update)
:returns: a list of triplets of (id, xid, record)
:rtype: list((int|None, str|None, dict))
"""
if context is None: context = {}
Converter = self.pool['ir.fields.converter']
columns = dict((k, v.column) for k, v in self._all_columns.iteritems())
Translation = self.pool['ir.translation']
field_names = dict(
(f, (Translation._get_source(cr, uid, self._name + ',' + f, 'field',
context.get('lang'))
or column.string))
for f, column in columns.iteritems())
convert = Converter.for_model(cr, uid, self, context=context)
def _log(base, field, exception):
type = 'warning' if isinstance(exception, Warning) else 'error'
# logs the logical (not human-readable) field name for automated
# processing of response, but injects human readable in message
record = dict(base, type=type, field=field,
message=unicode(exception.args[0]) % base)
if len(exception.args) > 1 and exception.args[1]:
record.update(exception.args[1])
log(record)
stream = CountingStream(records)
for record, extras in stream:
dbid = False
xid = False
# name_get/name_create
if None in record: pass
# xid
if 'id' in record:
xid = record['id']
# dbid
if '.id' in record:
try:
dbid = int(record['.id'])
except ValueError:
# in case of overridden id column
dbid = record['.id']
if not self.search(cr, uid, [('id', '=', dbid)], context=context):
log(dict(extras,
type='error',
record=stream.index,
field='.id',
message=_(u"Unknown database identifier '%s'") % dbid))
dbid = False
converted = convert(record, lambda field, err:\
_log(dict(extras, record=stream.index, field=field_names[field]), field, err))
yield dbid, xid, converted, dict(extras, record=stream.index)
def _validate(self, cr, uid, ids, context=None):
context = context or {}
lng = context.get('lang')
trans = self.pool.get('ir.translation')
error_msgs = []
for constraint in self._constraints:
fun, msg, fields = constraint
try:
# We don't pass around the context here: validation code
# must always yield the same results.
valid = fun(self, cr, uid, ids)
extra_error = None
except Exception, e:
_logger.debug('Exception while validating constraint', exc_info=True)
valid = False
extra_error = tools.ustr(e)
if not valid:
# Check presence of __call__ directly instead of using
# callable() because it will be deprecated as of Python 3.0
if hasattr(msg, '__call__'):
translated_msg = msg(self, cr, uid, ids, context=context)
if isinstance(translated_msg, tuple):
translated_msg = translated_msg[0] % translated_msg[1]
else:
translated_msg = trans._get_source(cr, uid, self._name, 'constraint', lng, msg)
if extra_error:
translated_msg += "\n\n%s\n%s" % (_('Error details:'), extra_error)
error_msgs.append(
_("The field(s) `%s` failed against a constraint: %s") % (', '.join(fields), translated_msg)
)
if error_msgs:
raise except_orm('ValidateError', '\n'.join(error_msgs))
def default_get(self, cr, uid, fields_list, context=None):
"""
Returns default values for the fields in fields_list.
:param fields_list: list of fields to get the default values for (example ['field1', 'field2',])
:type fields_list: list
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang`` (language) or ``context_tz`` (timezone) to alter the results of the call.
It may contain keys in the form ``default_XXX`` (where XXX is a field name), to set
or override a default value for a field.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: dictionary of the default values (set on the object model class, through user preferences, or in the context)
"""
# trigger view init hook
self.view_init(cr, uid, fields_list, context)
if not context:
context = {}
defaults = {}
# get the default values for the inherited fields
for t in self._inherits.keys():
defaults.update(self.pool[t].default_get(cr, uid, fields_list, context))
# get the default values defined in the object
for f in fields_list:
if f in self._defaults:
if callable(self._defaults[f]):
defaults[f] = self._defaults[f](self, cr, uid, context)
else:
defaults[f] = self._defaults[f]
fld_def = ((f in self._columns) and self._columns[f]) \
or ((f in self._inherit_fields) and self._inherit_fields[f][2]) \
or False
if isinstance(fld_def, fields.property):
property_obj = self.pool.get('ir.property')
prop_value = property_obj.get(cr, uid, f, self._name, context=context)
if prop_value:
if isinstance(prop_value, (browse_record, browse_null)):
defaults[f] = prop_value.id
else:
defaults[f] = prop_value
else:
if f not in defaults:
defaults[f] = False
# get the default values set by the user and override the default
# values defined in the object
ir_values_obj = self.pool.get('ir.values')
res = ir_values_obj.get(cr, uid, 'default', False, [self._name])
for id, field, field_value in res:
if field in fields_list:
fld_def = (field in self._columns) and self._columns[field] or self._inherit_fields[field][2]
if fld_def._type == 'many2one':
obj = self.pool[fld_def._obj]
if not obj.search(cr, uid, [('id', '=', field_value or False)]):
continue
if fld_def._type == 'many2many':
obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
if not obj.search(cr, uid, [('id', '=',
field_value[i])]):
continue
field_value2.append(field_value[i])
field_value = field_value2
if fld_def._type == 'one2many':
obj = self.pool[fld_def._obj]
field_value2 = []
for i in range(len(field_value or [])):
field_value2.append({})
for field2 in field_value[i]:
if field2 in obj._columns.keys() and obj._columns[field2]._type == 'many2one':
obj2 = self.pool[obj._columns[field2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
elif field2 in obj._inherit_fields.keys() and obj._inherit_fields[field2][2]._type == 'many2one':
obj2 = self.pool[obj._inherit_fields[field2][2]._obj]
if not obj2.search(cr, uid,
[('id', '=', field_value[i][field2])]):
continue
# TODO add test for many2many and one2many
field_value2[i][field2] = field_value[i][field2]
field_value = field_value2
defaults[field] = field_value
# get the default values from the context
for key in context or {}:
if key.startswith('default_') and (key[8:] in fields_list):
defaults[key[8:]] = context[key]
return defaults
def fields_get_keys(self, cr, user, context=None):
res = self._columns.keys()
# TODO I believe this loop can be replace by
# res.extend(self._inherit_fields.key())
for parent in self._inherits:
res.extend(self.pool[parent].fields_get_keys(cr, user, context))
return res
def _rec_name_fallback(self, cr, uid, context=None):
rec_name = self._rec_name
if rec_name not in self._columns:
rec_name = self._columns.keys()[0] if len(self._columns.keys()) > 0 else "id"
return rec_name
#
# Overload this method if you need a window title which depends on the context
#
def view_header_get(self, cr, user, view_id=None, view_type='form', context=None):
return False
def user_has_groups(self, cr, uid, groups, context=None):
"""Return true if the user is at least member of one of the groups
in groups_str. Typically used to resolve ``groups`` attribute
in view and model definitions.
:param str groups: comma-separated list of fully-qualified group
external IDs, e.g.: ``base.group_user,base.group_system``
:return: True if the current user is a member of one of the
given groups
"""
return any([self.pool.get('res.users').has_group(cr, uid, group_ext_id)
for group_ext_id in groups.split(',')])
def _get_default_form_view(self, cr, user, context=None):
""" Generates a default single-line form view using all fields
of the current model except the m2m and o2m ones.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a form view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('form', string=self._description)
group = etree.SubElement(view, 'group', col="4")
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
for field, descriptor in self.fields_get(cr, user, context=context).iteritems():
if descriptor['type'] in ('one2many', 'many2many'):
continue
etree.SubElement(group, 'field', name=field)
if descriptor['type'] == 'text':
etree.SubElement(group, 'newline')
return view
def _get_default_search_view(self, cr, user, context=None):
""" Generates a single-field search view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('search', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_tree_view(self, cr, user, context=None):
""" Generates a single-field tree view, based on _rec_name.
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a tree view as an lxml document
:rtype: etree._Element
"""
view = etree.Element('tree', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
return view
def _get_default_calendar_view(self, cr, user, context=None):
""" Generates a default calendar view by trying to infer
calendar fields from a number of pre-set attribute names
:param cr: database cursor
:param int user: user id
:param dict context: connection context
:returns: a calendar view
:rtype: etree._Element
"""
def set_first_of(seq, in_, to):
"""Sets the first value of ``seq`` also found in ``in_`` to
the ``to`` attribute of the view being closed over.
Returns whether it's found a suitable value (and set it on
the attribute) or not
"""
for item in seq:
if item in in_:
view.set(to, item)
return True
return False
view = etree.Element('calendar', string=self._description)
etree.SubElement(view, 'field', name=self._rec_name_fallback(cr, user, context))
if self._date_name not in self._columns:
date_found = False
for dt in ['date', 'date_start', 'x_date', 'x_date_start']:
if dt in self._columns:
self._date_name = dt
date_found = True
break
if not date_found:
raise except_orm(_('Invalid Object Architecture!'), _("Insufficient fields for Calendar View!"))
view.set('date_start', self._date_name)
set_first_of(["user_id", "partner_id", "x_user_id", "x_partner_id"],
self._columns, 'color')
if not set_first_of(["date_stop", "date_end", "x_date_stop", "x_date_end"],
self._columns, 'date_stop'):
if not set_first_of(["date_delay", "planned_hours", "x_date_delay", "x_planned_hours"],
self._columns, 'date_delay'):
raise except_orm(
_('Invalid Object Architecture!'),
_("Insufficient fields to generate a Calendar View for %s, missing a date_stop or a date_delay" % self._name))
return view
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Get the detailed composition of the requested view like fields, model, view architecture
:param view_id: id of the view or None
:param view_type: type of the view to return if view_id is None ('form', tree', ...)
:param toolbar: true to include contextual actions
:param submenu: deprecated
:return: dictionary describing the composition of the requested view (including inherited views and extensions)
:raise AttributeError:
* if the inherited view has unknown position to work with other than 'before', 'after', 'inside', 'replace'
* if some tag other than 'position' is found in parent view
:raise Invalid ArchitectureError: if there is view type other than form, tree, calendar, search etc defined on the structure
"""
if context is None:
context = {}
View = self.pool['ir.ui.view']
result = {
'model': self._name,
'field_parent': False,
}
# try to find a view_id if none provided
if not view_id:
# <view_type>_view_ref in context can be used to overrride the default view
view_ref_key = view_type + '_view_ref'
view_ref = context.get(view_ref_key)
if view_ref:
if '.' in view_ref:
module, view_ref = view_ref.split('.', 1)
cr.execute("SELECT res_id FROM ir_model_data WHERE model='ir.ui.view' AND module=%s AND name=%s", (module, view_ref))
view_ref_res = cr.fetchone()
if view_ref_res:
view_id = view_ref_res[0]
else:
_logger.warning('%r requires a fully-qualified external id (got: %r for model %s). '
'Please use the complete `module.view_id` form instead.', view_ref_key, view_ref,
self._name)
if not view_id:
# otherwise try to find the lowest priority matching ir.ui.view
view_id = View.default_view(cr, uid, self._name, view_type, context=context)
# context for post-processing might be overriden
ctx = context
if view_id:
# read the view with inherited views applied
root_view = View.read_combined(cr, uid, view_id, fields=['id', 'name', 'field_parent', 'type', 'model', 'arch'], context=context)
result['arch'] = root_view['arch']
result['name'] = root_view['name']
result['type'] = root_view['type']
result['view_id'] = root_view['id']
result['field_parent'] = root_view['field_parent']
# override context fro postprocessing
if root_view.get('model') != self._name:
ctx = dict(context, base_model_name=root_view.get('model'))
else:
# fallback on default views methods if no ir.ui.view could be found
try:
get_func = getattr(self, '_get_default_%s_view' % view_type)
arch_etree = get_func(cr, uid, context)
result['arch'] = etree.tostring(arch_etree, encoding='utf-8')
result['type'] = view_type
result['name'] = 'default'
except AttributeError:
raise except_orm(_('Invalid Architecture!'), _("No default view of type '%s' could be found !") % view_type)
# Apply post processing, groups and modifiers etc...
xarch, xfields = View.postprocess_and_fields(cr, uid, self._name, etree.fromstring(result['arch']), view_id, context=ctx)
result['arch'] = xarch
result['fields'] = xfields
# Add related action information if aksed
if toolbar:
toclean = ('report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml', 'report_sxw_content_data', 'report_rml_content_data')
def clean(x):
x = x[2]
for key in toclean:
x.pop(key, None)
return x
ir_values_obj = self.pool.get('ir.values')
resprint = ir_values_obj.get(cr, uid, 'action', 'client_print_multi', [(self._name, False)], False, context)
resaction = ir_values_obj.get(cr, uid, 'action', 'client_action_multi', [(self._name, False)], False, context)
resrelate = ir_values_obj.get(cr, uid, 'action', 'client_action_relate', [(self._name, False)], False, context)
resaction = [clean(action) for action in resaction if view_type == 'tree' or not action[2].get('multi')]
resprint = [clean(print_) for print_ in resprint if view_type == 'tree' or not print_[2].get('multi')]
#When multi="True" set it will display only in More of the list view
resrelate = [clean(action) for action in resrelate
if (action[2].get('multi') and view_type == 'tree') or (not action[2].get('multi') and view_type == 'form')]
for x in itertools.chain(resprint, resaction, resrelate):
x['string'] = x['name']
result['toolbar'] = {
'print': resprint,
'action': resaction,
'relate': resrelate
}
return result
def get_formview_id(self, cr, uid, id, context=None):
""" Return an view id to open the document with. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
return False
def get_formview_action(self, cr, uid, id, context=None):
""" Return an action to open the document. This method is meant to be
overridden in addons that want to give specific view ids for example.
:param int id: id of the document to open
"""
view_id = self.get_formview_id(cr, uid, id, context=context)
return {
'type': 'ir.actions.act_window',
'res_model': self._name,
'view_type': 'form',
'view_mode': 'form',
'views': [(view_id, 'form')],
'target': 'current',
'res_id': id,
}
def _view_look_dom_arch(self, cr, uid, node, view_id, context=None):
return self.pool['ir.ui.view'].postprocess_and_fields(
cr, uid, self._name, node, view_id, context=context)
def search_count(self, cr, user, args, context=None):
res = self.search(cr, user, args, context=context, count=True)
if isinstance(res, list):
return len(res)
return res
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
"""
Search for records based on a search domain.
:param cr: database cursor
:param user: current user id
:param args: list of tuples specifying the search domain [('field_name', 'operator', value), ...]. Pass an empty list to match all records.
:param offset: optional number of results to skip in the returned values (default: 0)
:param limit: optional max number of records to return (default: **None**)
:param order: optional columns to sort by (default: self._order=id )
:param context: optional context arguments, like lang, time zone
:type context: dictionary
:param count: optional (default: **False**), if **True**, returns only the number of records matching the criteria, not their ids
:return: id or list of ids of records matching the criteria
:rtype: integer or list of integers
:raise AccessError: * if user tries to bypass access rules for read on the requested object.
**Expressing a search domain (args)**
Each tuple in the search domain needs to have 3 elements, in the form: **('field_name', 'operator', value)**, where:
* **field_name** must be a valid name of field of the object model, possibly following many-to-one relationships using dot-notation, e.g 'street' or 'partner_id.country' are valid values.
* **operator** must be a string with a valid comparison operator from this list: ``=, !=, >, >=, <, <=, like, ilike, in, not in, child_of, parent_left, parent_right``
The semantics of most of these operators are obvious.
The ``child_of`` operator will look for records who are children or grand-children of a given record,
according to the semantics of this model (i.e following the relationship field named by
``self._parent_name``, by default ``parent_id``.
* **value** must be a valid value to compare with the values of **field_name**, depending on its type.
Domain criteria can be combined using 3 logical operators than can be added between tuples: '**&**' (logical AND, default), '**|**' (logical OR), '**!**' (logical NOT).
These are **prefix** operators and the arity of the '**&**' and '**|**' operator is 2, while the arity of the '**!**' is just 1.
Be very careful about this when you combine them the first time.
Here is an example of searching for Partners named *ABC* from Belgium and Germany whose language is not english ::
[('name','=','ABC'),'!',('language.code','=','en_US'),'|',('country_id.code','=','be'),('country_id.code','=','de'))
The '&' is omitted as it is the default, and of course we could have used '!=' for the language, but what this domain really represents is::
(name is 'ABC' AND (language is NOT english) AND (country is Belgium OR Germany))
"""
return self._search(cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count)
def name_get(self, cr, user, ids, context=None):
"""Returns the preferred display value (text representation) for the records with the
given ``ids``. By default this will be the value of the ``name`` column, unless
the model implements a custom behavior.
Can sometimes be seen as the inverse function of :meth:`~.name_search`, but it is not
guaranteed to be.
:rtype: list(tuple)
:return: list of pairs ``(id,text_repr)`` for all records with the given ``ids``.
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
if self._rec_name in self._all_columns:
rec_name_column = self._all_columns[self._rec_name].column
return [(r['id'], rec_name_column.as_display_name(cr, user, self, r[self._rec_name], context=context))
for r in self.read(cr, user, ids, [self._rec_name],
load='_classic_write', context=context)]
return [(id, "%s,%s" % (self._name, id)) for id in ids]
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
"""Search for records that have a display name matching the given ``name`` pattern if compared
with the given ``operator``, while also matching the optional search domain (``args``).
This is used for example to provide suggestions based on a partial value for a relational
field.
Sometimes be seen as the inverse function of :meth:`~.name_get`, but it is not
guaranteed to be.
This method is equivalent to calling :meth:`~.search` with a search domain based on ``name``
and then :meth:`~.name_get` on the result of the search.
:param list args: optional search domain (see :meth:`~.search` for syntax),
specifying further restrictions
:param str operator: domain operator for matching the ``name`` pattern, such as ``'like'``
or ``'='``.
:param int limit: optional max number of records to return
:rtype: list
:return: list of pairs ``(id,text_repr)`` for all matching records.
"""
return self._name_search(cr, user, name, args, operator, context, limit)
def name_create(self, cr, uid, name, context=None):
"""Creates a new record by calling :meth:`~.create` with only one
value provided: the name of the new record (``_rec_name`` field).
The new record will also be initialized with any default values applicable
to this model, or provided through the context. The usual behavior of
:meth:`~.create` applies.
Similarly, this method may raise an exception if the model has multiple
required fields and some do not have default values.
:param name: name of the record to create
:rtype: tuple
:return: the :meth:`~.name_get` pair value for the newly-created record.
"""
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
# private implementation of name_search, allows passing a dedicated user for the name_get part to
# solve some access rights issues
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
if context is None:
context = {}
args = args[:]
# optimize out the default criterion of ``ilike ''`` that matches everything
if not (name == '' and operator == 'ilike'):
args += [(self._rec_name, operator, name)]
access_rights_uid = name_get_uid or user
ids = self._search(cr, user, args, limit=limit, context=context, access_rights_uid=access_rights_uid)
res = self.name_get(cr, access_rights_uid, ids, context)
return res
def read_string(self, cr, uid, id, langs, fields=None, context=None):
res = {}
res2 = {}
self.pool.get('ir.translation').check_access_rights(cr, uid, 'read')
if not fields:
fields = self._columns.keys() + self._inherit_fields.keys()
#FIXME: collect all calls to _get_source into one SQL call.
for lang in langs:
res[lang] = {'code': lang}
for f in fields:
if f in self._columns:
res_trans = self.pool.get('ir.translation')._get_source(cr, uid, self._name+','+f, 'field', lang)
if res_trans:
res[lang][f] = res_trans
else:
res[lang][f] = self._columns[f].string
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), fields)
res2 = self.pool[table].read_string(cr, uid, id, langs, cols, context)
for lang in res2:
if lang in res:
res[lang]['code'] = lang
for f in res2[lang]:
res[lang][f] = res2[lang][f]
return res
def write_string(self, cr, uid, id, langs, vals, context=None):
self.pool.get('ir.translation').check_access_rights(cr, uid, 'write')
#FIXME: try to only call the translation in one SQL
for lang in langs:
for field in vals:
if field in self._columns:
src = self._columns[field].string
self.pool.get('ir.translation')._set_ids(cr, uid, self._name+','+field, 'field', lang, [0], vals[field], src)
for table in self._inherits:
cols = intersect(self._inherit_fields.keys(), vals)
if cols:
self.pool[table].write_string(cr, uid, id, langs, vals, context)
return True
def _add_missing_default_values(self, cr, uid, values, context=None):
missing_defaults = []
avoid_tables = [] # avoid overriding inherited values when parent is set
for tables, parent_field in self._inherits.items():
if parent_field in values:
avoid_tables.append(tables)
for field in self._columns.keys():
if not field in values:
missing_defaults.append(field)
for field in self._inherit_fields.keys():
if (field not in values) and (self._inherit_fields[field][0] not in avoid_tables):
missing_defaults.append(field)
if len(missing_defaults):
# override defaults with the provided values, never allow the other way around
defaults = self.default_get(cr, uid, missing_defaults, context)
for dv in defaults:
if ((dv in self._columns and self._columns[dv]._type == 'many2many') \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'many2many')) \
and defaults[dv] and isinstance(defaults[dv][0], (int, long)):
defaults[dv] = [(6, 0, defaults[dv])]
if (dv in self._columns and self._columns[dv]._type == 'one2many' \
or (dv in self._inherit_fields and self._inherit_fields[dv][2]._type == 'one2many')) \
and isinstance(defaults[dv], (list, tuple)) and defaults[dv] and isinstance(defaults[dv][0], dict):
defaults[dv] = [(0, 0, x) for x in defaults[dv]]
defaults.update(values)
values = defaults
return values
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi``.
"""
try:
getattr(self, '_ormcache')
self._ormcache = {}
self.pool._any_cache_cleared = True
except AttributeError:
pass
def _read_group_fill_results(self, cr, uid, domain, groupby, remaining_groupbys, aggregated_fields,
read_group_result, read_group_order=None, context=None):
"""Helper method for filling in empty groups for all possible values of
the field being grouped by"""
# self._group_by_full should map groupable fields to a method that returns
# a list of all aggregated values that we want to display for this field,
# in the form of a m2o-like pair (key,label).
# This is useful to implement kanban views for instance, where all columns
# should be displayed even if they don't contain any record.
# Grab the list of all groups that should be displayed, including all present groups
present_group_ids = [x[groupby][0] for x in read_group_result if x[groupby]]
all_groups,folded = self._group_by_full[groupby](self, cr, uid, present_group_ids, domain,
read_group_order=read_group_order,
access_rights_uid=openerp.SUPERUSER_ID,
context=context)
result_template = dict.fromkeys(aggregated_fields, False)
result_template[groupby + '_count'] = 0
if remaining_groupbys:
result_template['__context'] = {'group_by': remaining_groupbys}
# Merge the left_side (current results as dicts) with the right_side (all
# possible values as m2o pairs). Both lists are supposed to be using the
# same ordering, and can be merged in one pass.
result = []
known_values = {}
def append_left(left_side):
grouped_value = left_side[groupby] and left_side[groupby][0]
if not grouped_value in known_values:
result.append(left_side)
known_values[grouped_value] = left_side
else:
count_attr = groupby + '_count'
known_values[grouped_value].update({count_attr: left_side[count_attr]})
def append_right(right_side):
grouped_value = right_side[0]
if not grouped_value in known_values:
line = dict(result_template)
line[groupby] = right_side
line['__domain'] = [(groupby,'=',grouped_value)] + domain
result.append(line)
known_values[grouped_value] = line
while read_group_result or all_groups:
left_side = read_group_result[0] if read_group_result else None
right_side = all_groups[0] if all_groups else None
assert left_side is None or left_side[groupby] is False \
or isinstance(left_side[groupby], (tuple,list)), \
'M2O-like pair expected, got %r' % left_side[groupby]
assert right_side is None or isinstance(right_side, (tuple,list)), \
'M2O-like pair expected, got %r' % right_side
if left_side is None:
append_right(all_groups.pop(0))
elif right_side is None:
append_left(read_group_result.pop(0))
elif left_side[groupby] == right_side:
append_left(read_group_result.pop(0))
all_groups.pop(0) # discard right_side
elif not left_side[groupby] or not left_side[groupby][0]:
# left side == "Undefined" entry, not present on right_side
append_left(read_group_result.pop(0))
else:
append_right(all_groups.pop(0))
if folded:
for r in result:
r['__fold'] = folded.get(r[groupby] and r[groupby][0], False)
return result
def _read_group_prepare(self, orderby, aggregated_fields, annotated_groupbys, query):
"""
Prepares the GROUP BY and ORDER BY terms for the read_group method. Adds the missing JOIN clause
to the query if order should be computed against m2o field.
:param orderby: the orderby definition in the form "%(field)s %(order)s"
:param aggregated_fields: list of aggregated fields in the query
:param annotated_groupbys: list of dictionaries returned by _read_group_process_groupby
These dictionaries contains the qualified name of each groupby
(fully qualified SQL name for the corresponding field),
and the (non raw) field name.
:param osv.Query query: the query under construction
:return: (groupby_terms, orderby_terms)
"""
orderby_terms = []
groupby_terms = [gb['qualified_field'] for gb in annotated_groupbys]
groupby_fields = [gb['groupby'] for gb in annotated_groupbys]
if not orderby:
return groupby_terms, orderby_terms
self._check_qorder(orderby)
for order_part in orderby.split(','):
order_split = order_part.split()
order_field = order_split[0]
if order_field in groupby_fields:
if self._all_columns[order_field.split(':')[0]].column._type == 'many2one':
order_clause = self._generate_order_by(order_part, query).replace('ORDER BY ', '')
if order_clause:
orderby_terms.append(order_clause)
groupby_terms += [order_term.split()[0] for order_term in order_clause.split(',')]
else:
order = '"%s" %s' % (order_field, '' if len(order_split) == 1 else order_split[1])
orderby_terms.append(order)
elif order_field in aggregated_fields:
orderby_terms.append(order_part)
else:
# Cannot order by a field that will not appear in the results (needs to be grouped or aggregated)
_logger.warn('%s: read_group order by `%s` ignored, cannot sort on empty columns (not grouped/aggregated)',
self._name, order_part)
return groupby_terms, orderby_terms
def _read_group_process_groupby(self, gb, query, context):
"""
Helper method to collect important information about groupbys: raw
field name, type, time informations, qualified name, ...
"""
split = gb.split(':')
field_type = self._all_columns[split[0]].column._type
gb_function = split[1] if len(split) == 2 else None
temporal = field_type in ('date', 'datetime')
tz_convert = field_type == 'datetime' and context.get('tz') in pytz.all_timezones
qualified_field = self._inherits_join_calc(split[0], query)
if temporal:
display_formats = {
'day': 'dd MMM YYYY',
'week': "'W'w YYYY",
'month': 'MMMM YYYY',
'quarter': 'QQQ YYYY',
'year': 'YYYY'
}
time_intervals = {
'day': dateutil.relativedelta.relativedelta(days=1),
'week': datetime.timedelta(days=7),
'month': dateutil.relativedelta.relativedelta(months=1),
'quarter': dateutil.relativedelta.relativedelta(months=3),
'year': dateutil.relativedelta.relativedelta(years=1)
}
if tz_convert:
qualified_field = "timezone('%s', timezone('UTC',%s))" % (context.get('tz', 'UTC'), qualified_field)
qualified_field = "date_trunc('%s', %s)" % (gb_function or 'month', qualified_field)
if field_type == 'boolean':
qualified_field = "coalesce(%s,false)" % qualified_field
return {
'field': split[0],
'groupby': gb,
'type': field_type,
'display_format': display_formats[gb_function or 'month'] if temporal else None,
'interval': time_intervals[gb_function or 'month'] if temporal else None,
'tz_convert': tz_convert,
'qualified_field': qualified_field
}
def _read_group_prepare_data(self, key, value, groupby_dict, context):
"""
Helper method to sanitize the data received by read_group. The None
values are converted to False, and the date/datetime are formatted,
and corrected according to the timezones.
"""
value = False if value is None else value
gb = groupby_dict.get(key)
if gb and gb['type'] in ('date', 'datetime') and value:
if isinstance(value, basestring):
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if gb['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
value = datetime.datetime.strptime(value, dt_format)
if gb['tz_convert']:
value = pytz.timezone(context['tz']).localize(value)
return value
def _read_group_get_domain(self, groupby, value):
"""
Helper method to construct the domain corresponding to a groupby and
a given value. This is mostly relevant for date/datetime.
"""
if groupby['type'] in ('date', 'datetime') and value:
dt_format = DEFAULT_SERVER_DATETIME_FORMAT if groupby['type'] == 'datetime' else DEFAULT_SERVER_DATE_FORMAT
domain_dt_begin = value
domain_dt_end = value + groupby['interval']
if groupby['tz_convert']:
domain_dt_begin = domain_dt_begin.astimezone(pytz.utc)
domain_dt_end = domain_dt_end.astimezone(pytz.utc)
return [(groupby['field'], '>=', domain_dt_begin.strftime(dt_format)),
(groupby['field'], '<', domain_dt_end.strftime(dt_format))]
if groupby['type'] == 'many2one' and value:
value = value[0]
return [(groupby['field'], '=', value)]
def _read_group_format_result(self, data, annotated_groupbys, groupby, groupby_dict, domain, context):
"""
Helper method to format the data contained in the dictianary data by
adding the domain corresponding to its values, the groupbys in the
context and by properly formatting the date/datetime values.
"""
domain_group = [dom for gb in annotated_groupbys for dom in self._read_group_get_domain(gb, data[gb['groupby']])]
for k,v in data.iteritems():
gb = groupby_dict.get(k)
if gb and gb['type'] in ('date', 'datetime') and v:
data[k] = babel.dates.format_date(v, format=gb['display_format'], locale=context.get('lang', 'en_US'))
data['__domain'] = domain_group + domain
if len(groupby) - len(annotated_groupbys) >= 1:
data['__context'] = { 'group_by': groupby[len(annotated_groupbys):]}
del data['id']
return data
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
"""
Get the list of records in list view grouped by the given ``groupby`` fields
:param cr: database cursor
:param uid: current user id
:param domain: list specifying search criteria [['field_name', 'operator', 'value'], ...]
:param list fields: list of fields present in the list view specified on the object
:param list groupby: list of groupby descriptions by which the records will be grouped.
A groupby description is either a field (then it will be grouped by that field)
or a string 'field:groupby_function'. Right now, the only functions supported
are 'day', 'week', 'month', 'quarter' or 'year', and they only make sense for
date/datetime fields.
:param int offset: optional number of records to skip
:param int limit: optional max number of records to return
:param dict context: context arguments, like lang, time zone.
:param list orderby: optional ``order by`` specification, for
overriding the natural sort ordering of the
groups, see also :py:meth:`~osv.osv.osv.search`
(supported only for many2one fields currently)
:param bool lazy: if true, the results are only grouped by the first groupby and the
remaining groupbys are put in the __context key. If false, all the groupbys are
done in one call.
:return: list of dictionaries(one dictionary for each record) containing:
* the values of fields grouped by the fields in ``groupby`` argument
* __domain: list of tuples specifying the search criteria
* __context: dictionary with argument like ``groupby``
:rtype: [{'field_name_1': value, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
if context is None:
context = {}
self.check_access_rights(cr, uid, 'read')
query = self._where_calc(cr, uid, domain, context=context)
fields = fields or self._columns.keys()
groupby = [groupby] if isinstance(groupby, basestring) else groupby
groupby_list = groupby[:1] if lazy else groupby
annotated_groupbys = [self._read_group_process_groupby(gb, query, context)
for gb in groupby_list]
groupby_fields = [g['field'] for g in annotated_groupbys]
order = orderby or ','.join([g for g in groupby_list])
groupby_dict = {gb['groupby']: gb for gb in annotated_groupbys}
self._apply_ir_rules(cr, uid, query, 'read', context=context)
for gb in groupby_fields:
assert gb in fields, "Fields in 'groupby' must appear in the list of fields to read (perhaps it's missing in the list view?)"
groupby_def = self._columns.get(gb) or (self._inherit_fields.get(gb) and self._inherit_fields.get(gb)[2])
assert groupby_def and groupby_def._classic_write, "Fields in 'groupby' must be regular database-persisted fields (no function or related fields), or function fields with store=True"
if not (gb in self._all_columns):
# Don't allow arbitrary values, as this would be a SQL injection vector!
raise except_orm(_('Invalid group_by'),
_('Invalid group_by specification: "%s".\nA group_by specification must be a list of valid fields.')%(gb,))
aggregated_fields = [
f for f in fields
if f not in ('id', 'sequence')
if f not in groupby_fields
if self._all_columns[f].column._type in ('integer', 'float')
if getattr(self._all_columns[f].column, '_classic_write')]
field_formatter = lambda f: (self._all_columns[f].column.group_operator or 'sum', self._inherits_join_calc(f, query), f)
select_terms = ["%s(%s) AS %s" % field_formatter(f) for f in aggregated_fields]
for gb in annotated_groupbys:
select_terms.append('%s as "%s" ' % (gb['qualified_field'], gb['groupby']))
groupby_terms, orderby_terms = self._read_group_prepare(order, aggregated_fields, annotated_groupbys, query)
from_clause, where_clause, where_clause_params = query.get_sql()
if lazy and (len(groupby_fields) >= 2 or not context.get('group_by_no_leaf')):
count_field = groupby_fields[0] if len(groupby_fields) >= 1 else '_'
else:
count_field = '_'
prefix_terms = lambda prefix, terms: (prefix + " " + ",".join(terms)) if terms else ''
prefix_term = lambda prefix, term: ('%s %s' % (prefix, term)) if term else ''
query = """
SELECT min(%(table)s.id) AS id, count(%(table)s.id) AS %(count_field)s_count %(extra_fields)s
FROM %(from)s
%(where)s
%(groupby)s
%(orderby)s
%(limit)s
%(offset)s
""" % {
'table': self._table,
'count_field': count_field,
'extra_fields': prefix_terms(',', select_terms),
'from': from_clause,
'where': prefix_term('WHERE', where_clause),
'groupby': prefix_terms('GROUP BY', groupby_terms),
'orderby': prefix_terms('ORDER BY', orderby_terms),
'limit': prefix_term('LIMIT', int(limit) if limit else None),
'offset': prefix_term('OFFSET', int(offset) if limit else None),
}
cr.execute(query, where_clause_params)
fetched_data = cr.dictfetchall()
if not groupby_fields:
return fetched_data
many2onefields = [gb['field'] for gb in annotated_groupbys if gb['type'] == 'many2one']
if many2onefields:
data_ids = [r['id'] for r in fetched_data]
many2onefields = list(set(many2onefields))
data_dict = {d['id']: d for d in self.read(cr, uid, data_ids, many2onefields, context=context)}
for d in fetched_data:
d.update(data_dict[d['id']])
data = map(lambda r: {k: self._read_group_prepare_data(k,v, groupby_dict, context) for k,v in r.iteritems()}, fetched_data)
result = [self._read_group_format_result(d, annotated_groupbys, groupby, groupby_dict, domain, context) for d in data]
if lazy and groupby_fields[0] in self._group_by_full:
# Right now, read_group only fill results in lazy mode (by default).
# If you need to have the empty groups in 'eager' mode, then the
# method _read_group_fill_results need to be completely reimplemented
# in a sane way
result = self._read_group_fill_results(cr, uid, domain, groupby_fields[0], groupby[len(annotated_groupbys):],
aggregated_fields, result, read_group_order=order,
context=context)
return result
def _inherits_join_add(self, current_model, parent_model_name, query):
"""
Add missing table SELECT and JOIN clause to ``query`` for reaching the parent table (no duplicates)
:param current_model: current model object
:param parent_model_name: name of the parent model for which the clauses should be added
:param query: query object on which the JOIN should be added
"""
inherits_field = current_model._inherits[parent_model_name]
parent_model = self.pool[parent_model_name]
parent_alias, parent_alias_statement = query.add_join((current_model._table, parent_model._table, inherits_field, 'id', inherits_field), implicit=True)
return parent_alias
def _inherits_join_calc(self, field, query):
"""
Adds missing table select and join clause(s) to ``query`` for reaching
the field coming from an '_inherits' parent table (no duplicates).
:param field: name of inherited field to reach
:param query: query object on which the JOIN should be added
:return: qualified name of field, to be used in SELECT clause
"""
current_table = self
parent_alias = '"%s"' % current_table._table
while field in current_table._inherit_fields and not field in current_table._columns:
parent_model_name = current_table._inherit_fields[field][0]
parent_table = self.pool[parent_model_name]
parent_alias = self._inherits_join_add(current_table, parent_model_name, query)
current_table = parent_table
return '%s."%s"' % (parent_alias, field)
def _parent_store_compute(self, cr):
if not self._parent_store:
return
_logger.info('Computing parent left and right for table %s...', self._table)
def browse_rec(root, pos=0):
# TODO: set order
where = self._parent_name+'='+str(root)
if not root:
where = self._parent_name+' IS NULL'
if self._parent_order:
where += ' order by '+self._parent_order
cr.execute('SELECT id FROM '+self._table+' WHERE '+where)
pos2 = pos + 1
for id in cr.fetchall():
pos2 = browse_rec(id[0], pos2)
cr.execute('update '+self._table+' set parent_left=%s, parent_right=%s where id=%s', (pos, pos2, root))
return pos2 + 1
query = 'SELECT id FROM '+self._table+' WHERE '+self._parent_name+' IS NULL'
if self._parent_order:
query += ' order by ' + self._parent_order
pos = 0
cr.execute(query)
for (root,) in cr.fetchall():
pos = browse_rec(root, pos)
return True
def _update_store(self, cr, f, k):
_logger.info("storing computed values of fields.function '%s'", k)
ss = self._columns[k]._symbol_set
update_query = 'UPDATE "%s" SET "%s"=%s WHERE id=%%s' % (self._table, k, ss[0])
cr.execute('select id from '+self._table)
ids_lst = map(lambda x: x[0], cr.fetchall())
while ids_lst:
iids = ids_lst[:AUTOINIT_RECALCULATE_STORED_FIELDS]
ids_lst = ids_lst[AUTOINIT_RECALCULATE_STORED_FIELDS:]
res = f.get(cr, self, iids, k, SUPERUSER_ID, {})
for key, val in res.items():
if f._multi:
val = val[k]
# if val is a many2one, just write the ID
if type(val) == tuple:
val = val[0]
if val is not False:
cr.execute(update_query, (ss[1](val), key))
def _check_selection_field_value(self, cr, uid, field, value, context=None):
"""Raise except_orm if value is not among the valid values for the selection field"""
if self._columns[field]._type == 'reference':
val_model, val_id_str = value.split(',', 1)
val_id = False
try:
val_id = long(val_id_str)
except ValueError:
pass
if not val_id:
raise except_orm(_('ValidateError'),
_('Invalid value for reference field "%s.%s" (last part must be a non-zero integer): "%s"') % (self._table, field, value))
val = val_model
else:
val = value
if isinstance(self._columns[field].selection, (tuple, list)):
if val in dict(self._columns[field].selection):
return
elif val in dict(self._columns[field].selection(self, cr, uid, context=context)):
return
raise except_orm(_('ValidateError'),
_('The value "%s" for the field "%s.%s" is not in the selection') % (value, self._table, field))
def _check_removed_columns(self, cr, log=False):
# iterate on the database columns to drop the NOT NULL constraints
# of fields which were required but have been removed (or will be added by another module)
columns = [c for c in self._columns if not (isinstance(self._columns[c], fields.function) and not self._columns[c].store)]
columns += MAGIC_COLUMNS
cr.execute("SELECT a.attname, a.attnotnull"
" FROM pg_class c, pg_attribute a"
" WHERE c.relname=%s"
" AND c.oid=a.attrelid"
" AND a.attisdropped=%s"
" AND pg_catalog.format_type(a.atttypid, a.atttypmod) NOT IN ('cid', 'tid', 'oid', 'xid')"
" AND a.attname NOT IN %s", (self._table, False, tuple(columns))),
for column in cr.dictfetchall():
if log:
_logger.debug("column %s is in the table %s but not in the corresponding object %s",
column['attname'], self._table, self._name)
if column['attnotnull']:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, column['attname']))
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, column['attname'])
def _save_constraint(self, cr, constraint_name, type):
"""
Record the creation of a constraint for this model, to make it possible
to delete it later when the module is uninstalled. Type can be either
'f' or 'u' depending on the constraint being a foreign key or not.
"""
if not self._module:
# no need to save constraints for custom models as they're not part
# of any module
return
assert type in ('f', 'u')
cr.execute("""
SELECT 1 FROM ir_model_constraint, ir_module_module
WHERE ir_model_constraint.module=ir_module_module.id
AND ir_model_constraint.name=%s
AND ir_module_module.name=%s
""", (constraint_name, self._module))
if not cr.rowcount:
cr.execute("""
INSERT INTO ir_model_constraint
(name, date_init, date_update, module, model, type)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s), %s)""",
(constraint_name, self._module, self._name, type))
def _save_relation_table(self, cr, relation_table):
"""
Record the creation of a many2many for this model, to make it possible
to delete it later when the module is uninstalled.
"""
cr.execute("""
SELECT 1 FROM ir_model_relation, ir_module_module
WHERE ir_model_relation.module=ir_module_module.id
AND ir_model_relation.name=%s
AND ir_module_module.name=%s
""", (relation_table, self._module))
if not cr.rowcount:
cr.execute("""INSERT INTO ir_model_relation (name, date_init, date_update, module, model)
VALUES (%s, now() AT TIME ZONE 'UTC', now() AT TIME ZONE 'UTC',
(SELECT id FROM ir_module_module WHERE name=%s),
(SELECT id FROM ir_model WHERE model=%s))""",
(relation_table, self._module, self._name))
# checked version: for direct m2o starting from `self`
def _m2o_add_foreign_key_checked(self, source_field, dest_model, ondelete):
assert self.is_transient() or not dest_model.is_transient(), \
'Many2One relationships from non-transient Model to TransientModel are forbidden'
if self.is_transient() and not dest_model.is_transient():
# TransientModel relationships to regular Models are annoying
# usually because they could block deletion due to the FKs.
# So unless stated otherwise we default them to ondelete=cascade.
ondelete = ondelete or 'cascade'
fk_def = (self._table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
# unchecked version: for custom cases, such as m2m relationships
def _m2o_add_foreign_key_unchecked(self, source_table, source_field, dest_model, ondelete):
fk_def = (source_table, source_field, dest_model._table, ondelete or 'set null')
self._foreign_keys.add(fk_def)
_schema.debug("Table '%s': added foreign key '%s' with definition=REFERENCES \"%s\" ON DELETE %s", *fk_def)
def _drop_constraint(self, cr, source_table, constraint_name):
cr.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (source_table,constraint_name))
def _m2o_fix_foreign_key(self, cr, source_table, source_field, dest_model, ondelete):
# Find FK constraint(s) currently established for the m2o field,
# and see whether they are stale or not
cr.execute("""SELECT confdeltype as ondelete_rule, conname as constraint_name,
cl2.relname as foreign_table
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND cl1.relname = %s
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
AND att1.attname = %s
AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND att2.attname = %s
AND con.contype = 'f'""", (source_table, source_field, 'id'))
constraints = cr.dictfetchall()
if constraints:
if len(constraints) == 1:
# Is it the right constraint?
cons, = constraints
if cons['ondelete_rule'] != POSTGRES_CONFDELTYPES.get((ondelete or 'set null').upper(), 'a')\
or cons['foreign_table'] != dest_model._table:
# Wrong FK: drop it and recreate
_schema.debug("Table '%s': dropping obsolete FK constraint: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
else:
# it's all good, nothing to do!
return
else:
# Multiple FKs found for the same field, drop them all, and re-create
for cons in constraints:
_schema.debug("Table '%s': dropping duplicate FK constraints: '%s'",
source_table, cons['constraint_name'])
self._drop_constraint(cr, source_table, cons['constraint_name'])
# (re-)create the FK
self._m2o_add_foreign_key_checked(source_field, dest_model, ondelete)
def _auto_init(self, cr, context=None):
"""
Call _field_create and, unless _auto is False:
- create the corresponding table in database for the model,
- possibly add the parent columns in database,
- possibly add the columns 'create_uid', 'create_date', 'write_uid',
'write_date' in database if _log_access is True (the default),
- report on database columns no more existing in _columns,
- remove no more existing not null constraints,
- alter existing database columns to match _columns,
- create database tables to match _columns,
- add database indices to match _columns,
- save in self._foreign_keys a list a foreign keys to create (see
_auto_end).
"""
self._foreign_keys = set()
raise_on_invalid_object_name(self._name)
if context is None:
context = {}
store_compute = False
todo_end = []
update_custom_fields = context.get('update_custom_fields', False)
self._field_create(cr, context=context)
create = not self._table_exist(cr)
if self._auto:
if create:
self._create_table(cr)
cr.commit()
if self._parent_store:
if not self._parent_columns_exist(cr):
self._create_parent_columns(cr)
store_compute = True
# Create the create_uid, create_date, write_uid, write_date, columns if desired.
if self._log_access:
self._add_log_columns(cr)
self._check_removed_columns(cr, log=False)
# iterate on the "object columns"
column_data = self._select_column_data(cr)
for k, f in self._columns.iteritems():
if k in MAGIC_COLUMNS:
continue
# Don't update custom (also called manual) fields
if f.manual and not update_custom_fields:
continue
if isinstance(f, fields.one2many):
self._o2m_raise_on_missing_reference(cr, f)
elif isinstance(f, fields.many2many):
self._m2m_raise_or_create_relation(cr, f)
else:
res = column_data.get(k)
# The field is not found as-is in database, try if it
# exists with an old name.
if not res and hasattr(f, 'oldname'):
res = column_data.get(f.oldname)
if res:
cr.execute('ALTER TABLE "%s" RENAME "%s" TO "%s"' % (self._table, f.oldname, k))
res['attname'] = k
column_data[k] = res
_schema.debug("Table '%s': renamed column '%s' to '%s'",
self._table, f.oldname, k)
# The field already exists in database. Possibly
# change its type, rename it, drop it or change its
# constraints.
if res:
f_pg_type = res['typname']
f_pg_size = res['size']
f_pg_notnull = res['attnotnull']
if isinstance(f, fields.function) and not f.store and\
not getattr(f, 'nodrop', False):
_logger.info('column %s (%s) converted to a function, removed from table %s',
k, f.string, self._table)
cr.execute('ALTER TABLE "%s" DROP COLUMN "%s" CASCADE' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': dropped column '%s' with cascade",
self._table, k)
f_obj_type = None
else:
f_obj_type = get_pg_type(f) and get_pg_type(f)[0]
if f_obj_type:
ok = False
casts = [
('text', 'char', pg_varchar(f.size), '::%s' % pg_varchar(f.size)),
('varchar', 'text', 'TEXT', ''),
('int4', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('date', 'datetime', 'TIMESTAMP', '::TIMESTAMP'),
('timestamp', 'date', 'date', '::date'),
('numeric', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
('float8', 'float', get_pg_type(f)[1], '::'+get_pg_type(f)[1]),
]
if f_pg_type == 'varchar' and f._type == 'char' and f_pg_size and (f.size is None or f_pg_size < f.size):
try:
with cr.savepoint():
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" TYPE %s' % (self._table, k, pg_varchar(f.size)))
except psycopg2.NotSupportedError:
# In place alter table cannot be done because a view is depending of this field.
# Do a manual copy. This will drop the view (that will be recreated later)
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO temp_change_size' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, pg_varchar(f.size)))
cr.execute('UPDATE "%s" SET "%s"=temp_change_size::%s' % (self._table, k, pg_varchar(f.size)))
cr.execute('ALTER TABLE "%s" DROP COLUMN temp_change_size CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' (type varchar) changed size from %s to %s",
self._table, k, f_pg_size or 'unlimited', f.size or 'unlimited')
for c in casts:
if (f_pg_type==c[0]) and (f._type==c[1]):
if f_pg_type != f_obj_type:
ok = True
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO __temp_type_cast' % (self._table, k))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, c[2]))
cr.execute(('UPDATE "%s" SET "%s"= __temp_type_cast'+c[3]) % (self._table, k))
cr.execute('ALTER TABLE "%s" DROP COLUMN __temp_type_cast CASCADE' % (self._table,))
cr.commit()
_schema.debug("Table '%s': column '%s' changed type from %s to %s",
self._table, k, c[0], c[1])
break
if f_pg_type != f_obj_type:
if not ok:
i = 0
while True:
newname = k + '_moved' + str(i)
cr.execute("SELECT count(1) FROM pg_class c,pg_attribute a " \
"WHERE c.relname=%s " \
"AND a.attname=%s " \
"AND c.oid=a.attrelid ", (self._table, newname))
if not cr.fetchone()[0]:
break
i += 1
if f_pg_notnull:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % (self._table, k, newname))
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': column '%s' has changed type (DB=%s, def=%s), data moved to column %s !",
self._table, k, f_pg_type, f._type, newname)
# if the field is required and hasn't got a NOT NULL constraint
if f.required and f_pg_notnull == 0:
# set the field to the default value if any
if k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
if default is not None:
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s WHERE "%s" is NULL' % (self._table, k, ss[0], k)
cr.execute(query, (ss[1](default),))
# add the NOT NULL constraint
cr.commit()
try:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
cr.commit()
_schema.debug("Table '%s': column '%s': added NOT NULL constraint",
self._table, k)
except Exception:
msg = "Table '%s': unable to set a NOT NULL constraint on column '%s' !\n"\
"If you want to have it, you should update the records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_schema.warning(msg, self._table, k, self._table, k)
cr.commit()
elif not f.required and f_pg_notnull == 1:
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" DROP NOT NULL' % (self._table, k))
cr.commit()
_schema.debug("Table '%s': column '%s': dropped NOT NULL constraint",
self._table, k)
# Verify index
indexname = '%s_%s_index' % (self._table, k)
cr.execute("SELECT indexname FROM pg_indexes WHERE indexname = %s and tablename = %s", (indexname, self._table))
res2 = cr.dictfetchall()
if not res2 and f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
cr.commit()
if f._type == 'text':
# FIXME: for fields.text columns we should try creating GIN indexes instead (seems most suitable for an ERP context)
msg = "Table '%s': Adding (b-tree) index for %s column '%s'."\
"This is probably useless (does not work for fulltext search) and prevents INSERTs of long texts"\
" because there is a length limit for indexable btree values!\n"\
"Use a search view instead if you simply want to make the field searchable."
_schema.warning(msg, self._table, f._type, k)
if res2 and not f.select:
cr.execute('DROP INDEX "%s_%s_index"' % (self._table, k))
cr.commit()
msg = "Table '%s': dropping index for column '%s' of type '%s' as it is not required anymore"
_schema.debug(msg, self._table, k, f._type)
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
dest_model = self.pool[f._obj]
if dest_model._table != 'ir_actions':
self._m2o_fix_foreign_key(cr, self._table, k, dest_model, f.ondelete)
# The field doesn't exist in database. Create it if necessary.
else:
if not isinstance(f, fields.function) or f.store:
# add the missing field
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, k, get_pg_type(f)[1]))
cr.execute("COMMENT ON COLUMN %s.\"%s\" IS %%s" % (self._table, k), (f.string,))
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, k, get_pg_type(f)[1])
# initialize it
if not create and k in self._defaults:
if callable(self._defaults[k]):
default = self._defaults[k](self, cr, SUPERUSER_ID, context)
else:
default = self._defaults[k]
ss = self._columns[k]._symbol_set
query = 'UPDATE "%s" SET "%s"=%s' % (self._table, k, ss[0])
cr.execute(query, (ss[1](default),))
cr.commit()
_logger.debug("Table '%s': setting default value of new column %s", self._table, k)
# remember the functions to call for the stored fields
if isinstance(f, fields.function):
order = 10
if f.store is not True: # i.e. if f.store is a dict
order = f.store[f.store.keys()[0]][2]
todo_end.append((order, self._update_store, (f, k)))
# and add constraints if needed
if isinstance(f, fields.many2one) or (isinstance(f, fields.function) and f._type == 'many2one' and f.store):
if f._obj not in self.pool:
raise except_orm('Programming Error', 'There is no reference available for %s' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
# ir_actions is inherited so foreign key doesn't work on it
if ref != 'ir_actions':
self._m2o_add_foreign_key_checked(k, dest_model, f.ondelete)
if f.select:
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (self._table, k, self._table, k))
if f.required:
try:
cr.commit()
cr.execute('ALTER TABLE "%s" ALTER COLUMN "%s" SET NOT NULL' % (self._table, k), log_exceptions=False)
_schema.debug("Table '%s': column '%s': added a NOT NULL constraint",
self._table, k)
except Exception:
msg = "WARNING: unable to set column %s of table %s not null !\n"\
"Try to re-run: openerp-server --update=module\n"\
"If it doesn't work, update records and execute manually:\n"\
"ALTER TABLE %s ALTER COLUMN %s SET NOT NULL"
_logger.warning(msg, k, self._table, self._table, k)
cr.commit()
else:
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
create = not bool(cr.fetchone())
cr.commit() # start a new transaction
if self._auto:
self._add_sql_constraints(cr)
if create:
self._execute_sql(cr)
if store_compute:
self._parent_store_compute(cr)
cr.commit()
return todo_end
def _auto_end(self, cr, context=None):
""" Create the foreign keys recorded by _auto_init. """
for t, k, r, d in self._foreign_keys:
cr.execute('ALTER TABLE "%s" ADD FOREIGN KEY ("%s") REFERENCES "%s" ON DELETE %s' % (t, k, r, d))
self._save_constraint(cr, "%s_%s_fkey" % (t, k), 'f')
cr.commit()
del self._foreign_keys
def _table_exist(self, cr):
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (self._table,))
return cr.rowcount
def _create_table(self, cr):
cr.execute('CREATE TABLE "%s" (id SERIAL NOT NULL, PRIMARY KEY(id))' % (self._table,))
cr.execute(("COMMENT ON TABLE \"%s\" IS %%s" % self._table), (self._description,))
_schema.debug("Table '%s': created", self._table)
def _parent_columns_exist(self, cr):
cr.execute("""SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, 'parent_left'))
return cr.rowcount
def _create_parent_columns(self, cr):
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_left" INTEGER' % (self._table,))
cr.execute('ALTER TABLE "%s" ADD COLUMN "parent_right" INTEGER' % (self._table,))
if 'parent_left' not in self._columns:
_logger.error('create a column parent_left on object %s: fields.integer(\'Left Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_left', 'INTEGER')
elif not self._columns['parent_left'].select:
_logger.error('parent_left column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if 'parent_right' not in self._columns:
_logger.error('create a column parent_right on object %s: fields.integer(\'Right Parent\', select=1)',
self._table)
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, 'parent_right', 'INTEGER')
elif not self._columns['parent_right'].select:
_logger.error('parent_right column on object %s must be indexed! Add select=1 to the field definition)',
self._table)
if self._columns[self._parent_name].ondelete not in ('cascade', 'restrict'):
_logger.error("The column %s on object %s must be set as ondelete='cascade' or 'restrict'",
self._parent_name, self._name)
cr.commit()
def _add_log_columns(self, cr):
for field, field_def in LOG_ACCESS_COLUMNS.iteritems():
cr.execute("""
SELECT c.relname
FROM pg_class c, pg_attribute a
WHERE c.relname=%s AND a.attname=%s AND c.oid=a.attrelid
""", (self._table, field))
if not cr.rowcount:
cr.execute('ALTER TABLE "%s" ADD COLUMN "%s" %s' % (self._table, field, field_def))
cr.commit()
_schema.debug("Table '%s': added column '%s' with definition=%s",
self._table, field, field_def)
def _select_column_data(self, cr):
# attlen is the number of bytes necessary to represent the type when
# the type has a fixed size. If the type has a varying size attlen is
# -1 and atttypmod is the size limit + 4, or -1 if there is no limit.
cr.execute("SELECT c.relname,a.attname,a.attlen,a.atttypmod,a.attnotnull,a.atthasdef,t.typname,CASE WHEN a.attlen=-1 THEN (CASE WHEN a.atttypmod=-1 THEN 0 ELSE a.atttypmod-4 END) ELSE a.attlen END as size " \
"FROM pg_class c,pg_attribute a,pg_type t " \
"WHERE c.relname=%s " \
"AND c.oid=a.attrelid " \
"AND a.atttypid=t.oid", (self._table,))
return dict(map(lambda x: (x['attname'], x),cr.dictfetchall()))
def _o2m_raise_on_missing_reference(self, cr, f):
# TODO this check should be a method on fields.one2many.
if f._obj in self.pool:
other = self.pool[f._obj]
# TODO the condition could use fields_get_keys().
if f._fields_id not in other._columns.keys():
if f._fields_id not in other._inherit_fields.keys():
raise except_orm('Programming Error', "There is no reference field '%s' found for '%s'" % (f._fields_id, f._obj,))
def _m2m_raise_or_create_relation(self, cr, f):
m2m_tbl, col1, col2 = f._sql_names(self)
self._save_relation_table(cr, m2m_tbl)
cr.execute("SELECT relname FROM pg_class WHERE relkind IN ('r','v') AND relname=%s", (m2m_tbl,))
if not cr.dictfetchall():
if f._obj not in self.pool:
raise except_orm('Programming Error', 'Many2Many destination model does not exist: `%s`' % (f._obj,))
dest_model = self.pool[f._obj]
ref = dest_model._table
cr.execute('CREATE TABLE "%s" ("%s" INTEGER NOT NULL, "%s" INTEGER NOT NULL, UNIQUE("%s","%s"))' % (m2m_tbl, col1, col2, col1, col2))
# create foreign key references with ondelete=cascade, unless the targets are SQL views
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (ref,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col2, dest_model, 'cascade')
cr.execute("SELECT relkind FROM pg_class WHERE relkind IN ('v') AND relname=%s", (self._table,))
if not cr.fetchall():
self._m2o_add_foreign_key_unchecked(m2m_tbl, col1, self, 'cascade')
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col1, m2m_tbl, col1))
cr.execute('CREATE INDEX "%s_%s_index" ON "%s" ("%s")' % (m2m_tbl, col2, m2m_tbl, col2))
cr.execute("COMMENT ON TABLE \"%s\" IS 'RELATION BETWEEN %s AND %s'" % (m2m_tbl, self._table, ref))
cr.commit()
_schema.debug("Create table '%s': m2m relation between '%s' and '%s'", m2m_tbl, self._table, ref)
def _add_sql_constraints(self, cr):
"""
Modify this model's database table constraints so they match the one in
_sql_constraints.
"""
def unify_cons_text(txt):
return txt.lower().replace(', ',',').replace(' (','(')
for (key, con, _) in self._sql_constraints:
conname = '%s_%s' % (self._table, key)
self._save_constraint(cr, conname, 'u')
cr.execute("SELECT conname, pg_catalog.pg_get_constraintdef(oid, true) as condef FROM pg_constraint where conname=%s", (conname,))
existing_constraints = cr.dictfetchall()
sql_actions = {
'drop': {
'execute': False,
'query': 'ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (self._table, conname, ),
'msg_ok': "Table '%s': dropped constraint '%s'. Reason: its definition changed from '%%s' to '%s'" % (
self._table, conname, con),
'msg_err': "Table '%s': unable to drop \'%s\' constraint !" % (self._table, con),
'order': 1,
},
'add': {
'execute': False,
'query': 'ALTER TABLE "%s" ADD CONSTRAINT "%s" %s' % (self._table, conname, con,),
'msg_ok': "Table '%s': added constraint '%s' with definition=%s" % (self._table, conname, con),
'msg_err': "Table '%s': unable to add \'%s\' constraint !\n If you want to have it, you should update the records and execute manually:\n%%s" % (
self._table, con),
'order': 2,
},
}
if not existing_constraints:
# constraint does not exists:
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
elif unify_cons_text(con) not in [unify_cons_text(item['condef']) for item in existing_constraints]:
# constraint exists but its definition has changed:
sql_actions['drop']['execute'] = True
sql_actions['drop']['msg_ok'] = sql_actions['drop']['msg_ok'] % (existing_constraints[0]['condef'].lower(), )
sql_actions['add']['execute'] = True
sql_actions['add']['msg_err'] = sql_actions['add']['msg_err'] % (sql_actions['add']['query'], )
# we need to add the constraint:
sql_actions = [item for item in sql_actions.values()]
sql_actions.sort(key=lambda x: x['order'])
for sql_action in [action for action in sql_actions if action['execute']]:
try:
cr.execute(sql_action['query'])
cr.commit()
_schema.debug(sql_action['msg_ok'])
except:
_schema.warning(sql_action['msg_err'])
cr.rollback()
def _execute_sql(self, cr):
""" Execute the SQL code from the _sql attribute (if any)."""
if hasattr(self, "_sql"):
for line in self._sql.split(';'):
line2 = line.replace('\n', '').strip()
if line2:
cr.execute(line2)
cr.commit()
#
# Update objects that uses this one to update their _inherits fields
#
def _inherits_reload_src(self):
""" Recompute the _inherit_fields mapping on each _inherits'd child model."""
for obj in self.pool.models.values():
if self._name in obj._inherits:
obj._inherits_reload()
def _inherits_reload(self):
""" Recompute the _inherit_fields mapping.
This will also call itself on each inherits'd child model.
"""
res = {}
for table in self._inherits:
other = self.pool[table]
for col in other._columns.keys():
res[col] = (table, self._inherits[table], other._columns[col], table)
for col in other._inherit_fields.keys():
res[col] = (table, self._inherits[table], other._inherit_fields[col][2], other._inherit_fields[col][3])
self._inherit_fields = res
self._all_columns = self._get_column_infos()
self._inherits_reload_src()
def _get_column_infos(self):
"""Returns a dict mapping all fields names (direct fields and
inherited field via _inherits) to a ``column_info`` struct
giving detailed columns """
result = {}
for k, (parent, m2o, col, original_parent) in self._inherit_fields.iteritems():
result[k] = fields.column_info(k, col, parent, m2o, original_parent)
for k, col in self._columns.iteritems():
result[k] = fields.column_info(k, col)
return result
def _inherits_check(self):
for table, field_name in self._inherits.items():
if field_name not in self._columns:
_logger.info('Missing many2one field definition for _inherits reference "%s" in "%s", using default one.', field_name, self._name)
self._columns[field_name] = fields.many2one(table, string="Automatically created field to link to parent %s" % table,
required=True, ondelete="cascade")
elif not self._columns[field_name].required or self._columns[field_name].ondelete.lower() not in ("cascade", "restrict"):
_logger.warning('Field definition for _inherits reference "%s" in "%s" must be marked as "required" with ondelete="cascade" or "restrict", forcing it to required + cascade.', field_name, self._name)
self._columns[field_name].required = True
self._columns[field_name].ondelete = "cascade"
def fields_get(self, cr, user, allfields=None, context=None, write_access=True):
""" Return the definition of each field.
The returned value is a dictionary (indiced by field name) of
dictionaries. The _inherits'd fields are included. The string, help,
and selection (if present) attributes are translated.
:param cr: database cursor
:param user: current user id
:param allfields: list of fields
:param context: context arguments, like lang, time zone
:return: dictionary of field dictionaries, each one describing a field of the business object
:raise AccessError: * if user has no create/write rights on the requested object
"""
if context is None:
context = {}
write_access = self.check_access_rights(cr, user, 'write', raise_exception=False) \
or self.check_access_rights(cr, user, 'create', raise_exception=False)
res = {}
translation_obj = self.pool.get('ir.translation')
for parent in self._inherits:
res.update(self.pool[parent].fields_get(cr, user, allfields, context))
for f, field in self._columns.iteritems():
if (allfields and f not in allfields) or \
(field.groups and not self.user_has_groups(cr, user, groups=field.groups, context=context)):
continue
res[f] = fields.field_to_dict(self, cr, user, field, context=context)
if not write_access:
res[f]['readonly'] = True
res[f]['states'] = {}
if 'lang' in context:
if 'string' in res[f]:
res_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'field', context['lang'])
if res_trans:
res[f]['string'] = res_trans
if 'help' in res[f]:
help_trans = translation_obj._get_source(cr, user, self._name + ',' + f, 'help', context['lang'])
if help_trans:
res[f]['help'] = help_trans
return res
def get_empty_list_help(self, cr, user, help, context=None):
""" Generic method giving the help message displayed when having
no result to display in a list or kanban view. By default it returns
the help given in parameter that is generally the help message
defined in the action.
"""
return help
def check_field_access_rights(self, cr, user, operation, fields, context=None):
"""
Check the user access rights on the given fields. This raises Access
Denied if the user does not have the rights. Otherwise it returns the
fields (as is if the fields is not falsy, or the readable/writable
fields if fields is falsy).
"""
def p(field_name):
"""Predicate to test if the user has access to the given field name."""
# Ignore requested field if it doesn't exist. This is ugly but
# it seems to happen at least with 'name_alias' on res.partner.
if field_name not in self._all_columns:
return True
field = self._all_columns[field_name].column
if user != SUPERUSER_ID and field.groups:
return self.user_has_groups(cr, user, groups=field.groups, context=context)
else:
return True
if not fields:
fields = filter(p, self._all_columns.keys())
else:
filtered_fields = filter(lambda a: not p(a), fields)
if filtered_fields:
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s, fields: %s', operation, user, self._name, ', '.join(filtered_fields))
raise except_orm(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. '
'Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
return fields
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
""" Read records with given ids with the given fields
:param cr: database cursor
:param user: current user id
:param ids: id or list of the ids of the records to read
:param fields: optional list of field names to return (default: all fields would be returned)
:type fields: list (example ['field_name_1', ...])
:param context: optional context dictionary - it may contains keys for specifying certain options
like ``context_lang``, ``context_tz`` to alter the results of the call.
A special ``bin_size`` boolean flag may also be passed in the context to request the
value of all fields.binary columns to be returned as the size of the binary instead of its
contents. This can also be selectively overriden by passing a field-specific flag
in the form ``bin_size_XXX: True/False`` where ``XXX`` is the name of the field.
Note: The ``bin_size_XXX`` form is new in OpenERP v6.0.
:return: list of dictionaries((dictionary per record asked)) with requested field values
:rtype: [{‘name_of_the_field’: value, ...}, ...]
:raise AccessError: * if user has no read rights on the requested object
* if user tries to bypass access rules for read on the requested object
"""
self.check_access_rights(cr, user, 'read')
fields = self.check_field_access_rights(cr, user, 'read', fields)
if isinstance(ids, (int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: isinstance(x, dict) and x['id'] or x, select)
result = self._read_flat(cr, user, select, fields, context, load)
if isinstance(ids, (int, long)):
return result and result[0] or False
return result
def _read_flat(self, cr, user, ids, fields_to_read, context=None, load='_classic_read'):
if not context:
context = {}
if not ids:
return []
if fields_to_read is None:
fields_to_read = self._columns.keys()
# all inherited fields + all non inherited fields for which the attribute whose name is in load is True
fields_pre = [f for f in fields_to_read if
f == self.CONCURRENCY_CHECK_FIELD
or (f in self._columns and getattr(self._columns[f], '_classic_write'))
] + self._inherits.values()
res = []
if len(fields_pre):
def convert_field(f):
f_qual = '%s."%s"' % (self._table, f) # need fully-qualified references in case len(tables) > 1
if f in ('create_date', 'write_date'):
return "date_trunc('second', %s) as %s" % (f_qual, f)
if f == self.CONCURRENCY_CHECK_FIELD:
if self._log_access:
return "COALESCE(%s.write_date, %s.create_date, (now() at time zone 'UTC'))::timestamp AS %s" % (self._table, self._table, f,)
return "(now() at time zone 'UTC')::timestamp AS %s" % (f,)
if isinstance(self._columns[f], fields.binary) and context.get('bin_size', False):
return 'length(%s) as "%s"' % (f_qual, f)
return f_qual
# Construct a clause for the security rules.
# 'tables' hold the list of tables necessary for the SELECT including the ir.rule clauses,
# or will at least contain self._table.
rule_clause, rule_params, tables = self.pool.get('ir.rule').domain_get(cr, user, self._name, 'read', context=context)
fields_pre2 = map(convert_field, fields_pre)
order_by = self._parent_order or self._order
select_fields = ','.join(fields_pre2 + ['%s.id' % self._table])
query = 'SELECT %s FROM %s WHERE %s.id IN %%s' % (select_fields, ','.join(tables), self._table)
if rule_clause:
query += " AND " + (' OR '.join(rule_clause))
query += " ORDER BY " + order_by
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute(query, [tuple(sub_ids)] + rule_params)
results = cr.dictfetchall()
result_ids = [x['id'] for x in results]
self._check_record_rules_result_count(cr, user, sub_ids, result_ids, 'read', context=context)
res.extend(results)
else:
self.check_access_rule(cr, user, ids, 'read', context=context)
res = map(lambda x: {'id': x}, ids)
if context.get('lang'):
for f in fields_pre:
if f == self.CONCURRENCY_CHECK_FIELD:
continue
if self._columns[f].translate:
ids = [x['id'] for x in res]
#TODO: optimize out of this loop
res_trans = self.pool.get('ir.translation')._get_ids(cr, user, self._name+','+f, 'model', context['lang'], ids)
for r in res:
r[f] = res_trans.get(r['id'], False) or r[f]
for table in self._inherits:
col = self._inherits[table]
cols = [x for x in intersect(self._inherit_fields.keys(), fields_to_read) if x not in self._columns.keys()]
if not cols:
continue
res2 = self.pool[table].read(cr, user, [x[col] for x in res], cols, context, load)
res3 = {}
for r in res2:
res3[r['id']] = r
del r['id']
for record in res:
if not record[col]: # if the record is deleted from _inherits table?
continue
record.update(res3[record[col]])
if col not in fields_to_read:
del record[col]
# all fields which need to be post-processed by a simple function (symbol_get)
fields_post = filter(lambda x: x in self._columns and self._columns[x]._symbol_get, fields_to_read)
if fields_post:
for r in res:
for f in fields_post:
r[f] = self._columns[f]._symbol_get(r[f])
ids = [x['id'] for x in res]
# all non inherited fields for which the attribute whose name is in load is False
fields_post = filter(lambda x: x in self._columns and not getattr(self._columns[x], load), fields_to_read)
# Compute POST fields
todo = {}
for f in fields_post:
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key, val in todo.items():
if key:
res2 = self._columns[val[0]].get(cr, self, ids, val, user, context=context, values=res)
assert res2 is not None, \
'The function field "%s" on the "%s" model returned None\n' \
'(a dictionary was expected).' % (val[0], self._name)
for pos in val:
for record in res:
if isinstance(res2[record['id']], str): res2[record['id']] = eval(res2[record['id']]) #TOCHECK : why got string instend of dict in python2.6
multi_fields = res2.get(record['id'],{})
if multi_fields:
record[pos] = multi_fields.get(pos,[])
else:
for f in val:
res2 = self._columns[f].get(cr, self, ids, f, user, context=context, values=res)
for record in res:
if res2:
record[f] = res2[record['id']]
else:
record[f] = []
# Warn about deprecated fields now that fields_pre and fields_post are computed
# Explicitly use list() because we may receive tuples
for f in list(fields_pre) + list(fields_post):
field_column = self._all_columns.get(f) and self._all_columns.get(f).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, f, field_column.deprecated)
readonly = None
for vals in res:
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
if fobj:
groups = fobj.read
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
if type(vals[field]) == type([]):
vals[field] = []
elif type(vals[field]) == type(0.0):
vals[field] = 0
elif type(vals[field]) == type(''):
vals[field] = '=No Permission='
else:
vals[field] = False
if vals[field] is None:
vals[field] = False
return res
# TODO check READ access
def perm_read(self, cr, user, ids, context=None, details=True):
"""
Returns some metadata about the given records.
:param details: if True, \*_uid fields are replaced with the name of the user
:return: list of ownership dictionaries for each requested record
:rtype: list of dictionaries with the following keys:
* id: object id
* create_uid: user who created the record
* create_date: date when the record was created
* write_uid: last user who changed the record
* write_date: date of the last change to the record
* xmlid: XML ID to use to refer to this record (if there is one), in format ``module.name``
"""
if not context:
context = {}
if not ids:
return []
fields = ''
uniq = isinstance(ids, (int, long))
if uniq:
ids = [ids]
fields = ['id']
if self._log_access:
fields += ['create_uid', 'create_date', 'write_uid', 'write_date']
quoted_table = '"%s"' % self._table
fields_str = ",".join('%s.%s'%(quoted_table, field) for field in fields)
query = '''SELECT %s, __imd.module, __imd.name
FROM %s LEFT JOIN ir_model_data __imd
ON (__imd.model = %%s and __imd.res_id = %s.id)
WHERE %s.id IN %%s''' % (fields_str, quoted_table, quoted_table, quoted_table)
cr.execute(query, (self._name, tuple(ids)))
res = cr.dictfetchall()
for r in res:
for key in r:
r[key] = r[key] or False
if details and key in ('write_uid', 'create_uid') and r[key]:
try:
r[key] = self.pool.get('res.users').name_get(cr, user, [r[key]])[0]
except Exception:
pass # Leave the numeric uid there
r['xmlid'] = ("%(module)s.%(name)s" % r) if r['name'] else False
del r['name'], r['module']
if uniq:
return res[ids[0]]
return res
def _check_concurrency(self, cr, ids, context):
if not context:
return
if not (context.get(self.CONCURRENCY_CHECK_FIELD) and self._log_access):
return
check_clause = "(id = %s AND %s < COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp)"
for sub_ids in cr.split_for_in_conditions(ids):
ids_to_check = []
for id in sub_ids:
id_ref = "%s,%s" % (self._name, id)
update_date = context[self.CONCURRENCY_CHECK_FIELD].pop(id_ref, None)
if update_date:
ids_to_check.extend([id, update_date])
if not ids_to_check:
continue
cr.execute("SELECT id FROM %s WHERE %s" % (self._table, " OR ".join([check_clause]*(len(ids_to_check)/2))), tuple(ids_to_check))
res = cr.fetchone()
if res:
# mention the first one only to keep the error message readable
raise except_orm('ConcurrencyException', _('A document was modified since you last viewed it (%s:%d)') % (self._description, res[0]))
def _check_record_rules_result_count(self, cr, uid, ids, result_ids, operation, context=None):
"""Verify the returned rows after applying record rules matches
the length of `ids`, and raise an appropriate exception if it does not.
"""
ids, result_ids = set(ids), set(result_ids)
missing_ids = ids - result_ids
if missing_ids:
# Attempt to distinguish record rule restriction vs deleted records,
# to provide a more specific error message - check if the missinf
cr.execute('SELECT id FROM ' + self._table + ' WHERE id IN %s', (tuple(missing_ids),))
forbidden_ids = [x[0] for x in cr.fetchall()]
if forbidden_ids:
# the missing ids are (at least partially) hidden by access rules
if uid == SUPERUSER_ID:
return
_logger.warning('Access Denied by record rules for operation: %s on record ids: %r, uid: %s, model: %s', operation, forbidden_ids, uid, self._name)
raise except_orm(_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % \
(self._description, operation))
else:
# If we get here, the missing_ids are not in the database
if operation in ('read','unlink'):
# No need to warn about deleting an already deleted record.
# And no error when reading a record that was deleted, to prevent spurious
# errors for non-transactional search/read sequences coming from clients
return
_logger.warning('Failed operation on deleted record(s): %s, uid: %s, model: %s', operation, uid, self._name)
raise except_orm(_('Missing document(s)'),
_('One of the documents you are trying to access has been deleted, please try again after refreshing.'))
def check_access_rights(self, cr, uid, operation, raise_exception=True): # no context on purpose.
"""Verifies that the operation given by ``operation`` is allowed for the user
according to the access rights."""
return self.pool.get('ir.model.access').check(cr, uid, self._name, operation, raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
"""Verifies that the operation given by ``operation`` is allowed for the user
according to ir.rules.
:param operation: one of ``write``, ``unlink``
:raise except_orm: * if current ir.rules do not permit this operation.
:return: None if the operation is allowed
"""
if uid == SUPERUSER_ID:
return
if self.is_transient():
# Only one single implicit access rule for transient models: owner only!
# This is ok to hardcode because we assert that TransientModels always
# have log_access enabled so that the create_uid column is always there.
# And even with _inherits, these fields are always present in the local
# table too, so no need for JOINs.
cr.execute("""SELECT distinct create_uid
FROM %s
WHERE id IN %%s""" % self._table, (tuple(ids),))
uids = [x[0] for x in cr.fetchall()]
if len(uids) != 1 or uids[0] != uid:
raise except_orm(_('Access Denied'),
_('For this kind of document, you may only access records you created yourself.\n\n(Document type: %s)') % (self._description,))
else:
where_clause, where_params, tables = self.pool.get('ir.rule').domain_get(cr, uid, self._name, operation, context=context)
if where_clause:
where_clause = ' and ' + ' and '.join(where_clause)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('SELECT ' + self._table + '.id FROM ' + ','.join(tables) +
' WHERE ' + self._table + '.id IN %s' + where_clause,
[sub_ids] + where_params)
returned_ids = [x['id'] for x in cr.dictfetchall()]
self._check_record_rules_result_count(cr, uid, sub_ids, returned_ids, operation, context=context)
def create_workflow(self, cr, uid, ids, context=None):
"""Create a workflow instance for each given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_create(uid, self._name, res_id, cr)
return True
def delete_workflow(self, cr, uid, ids, context=None):
"""Delete the workflow instances bound to the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_delete(uid, self._name, res_id, cr)
return True
def step_workflow(self, cr, uid, ids, context=None):
"""Reevaluate the workflow instances of the given record IDs."""
from openerp import workflow
for res_id in ids:
workflow.trg_write(uid, self._name, res_id, cr)
return True
def signal_workflow(self, cr, uid, ids, signal, context=None):
"""Send given workflow signal and return a dict mapping ids to workflow results"""
from openerp import workflow
result = {}
for res_id in ids:
result[res_id] = workflow.trg_validate(uid, self._name, res_id, signal, cr)
return result
def redirect_workflow(self, cr, uid, old_new_ids, context=None):
""" Rebind the workflow instance bound to the given 'old' record IDs to
the given 'new' IDs. (``old_new_ids`` is a list of pairs ``(old, new)``.
"""
from openerp import workflow
for old_id, new_id in old_new_ids:
workflow.trg_redirect(uid, self._name, old_id, new_id, cr)
return True
def unlink(self, cr, uid, ids, context=None):
"""
Delete records with given ids
:param cr: database cursor
:param uid: current user id
:param ids: id or list of ids
:param context: (optional) context arguments, like lang, time zone
:return: True
:raise AccessError: * if user has no unlink rights on the requested object
* if user tries to bypass access rules for unlink on the requested object
:raise UserError: if the record is default property for other records
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
result_store = self._store_get_values(cr, uid, ids, self._all_columns.keys(), context)
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, uid, 'unlink')
ir_property = self.pool.get('ir.property')
# Check if the records are used as default properties.
domain = [('res_id', '=', False),
('value_reference', 'in', ['%s,%s' % (self._name, i) for i in ids]),
]
if ir_property.search(cr, uid, domain, context=context):
raise except_orm(_('Error'), _('Unable to delete this document because it is used as a default property'))
# Delete the records' properties.
property_ids = ir_property.search(cr, uid, [('res_id', 'in', ['%s,%s' % (self._name, i) for i in ids])], context=context)
ir_property.unlink(cr, uid, property_ids, context=context)
self.delete_workflow(cr, uid, ids, context=context)
self.check_access_rule(cr, uid, ids, 'unlink', context=context)
pool_model_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('delete from ' + self._table + ' ' \
'where id IN %s', (sub_ids,))
# Removing the ir_model_data reference if the record being deleted is a record created by xml/csv file,
# as these are not connected with real database foreign keys, and would be dangling references.
# Note: following steps performed as admin to avoid access rights restrictions, and with no context
# to avoid possible side-effects during admin calls.
# Step 1. Calling unlink of ir_model_data only for the affected IDS
reference_ids = pool_model_data.search(cr, SUPERUSER_ID, [('res_id','in',list(sub_ids)),('model','=',self._name)])
# Step 2. Marching towards the real deletion of referenced records
if reference_ids:
pool_model_data.unlink(cr, SUPERUSER_ID, reference_ids)
# For the same reason, removing the record relevant to ir_values
ir_value_ids = ir_values_obj.search(cr, uid,
['|',('value','in',['%s,%s' % (self._name, sid) for sid in sub_ids]),'&',('res_id','in',list(sub_ids)),('model','=',self._name)],
context=context)
if ir_value_ids:
ir_values_obj.unlink(cr, uid, ir_value_ids, context=context)
for order, obj_name, store_ids, fields in result_store:
if obj_name == self._name:
effective_store_ids = list(set(store_ids) - set(ids))
else:
effective_store_ids = store_ids
if effective_store_ids:
obj = self.pool[obj_name]
cr.execute('select id from '+obj._table+' where id IN %s', (tuple(effective_store_ids),))
rids = map(lambda x: x[0], cr.fetchall())
if rids:
obj._store_set_values(cr, uid, rids, fields, context)
return True
#
# TODO: Validate
#
def write(self, cr, user, ids, vals, context=None):
"""
Update records with given ids with the given field values
:param cr: database cursor
:param user: current user id
:type user: integer
:param ids: object id or list of object ids to update according to **vals**
:param vals: field values to update, e.g {'field_name': new_field_value, ...}
:type vals: dictionary
:param context: (optional) context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: True
:raise AccessError: * if user has no write rights on the requested object
* if user tries to bypass access rules for write on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific:
+ For a many2many field, a list of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
(3, ID) cut the link to the linked record with id = ID (delete the relationship between the two objects but does not delete the target object itself)
(4, ID) link to existing record with id = ID (adds a relationship)
(5) unlink all (like using (3,ID) for all linked records)
(6, 0, [IDs]) replace the list of linked IDs (like using (5) then (4,ID) for each ID in the list of IDs)
Example:
[(6, 0, [8, 5, 6, 4])] sets the many2many to ids [8, 5, 6, 4]
+ For a one2many field, a lits of tuples is expected.
Here is the list of tuple that are accepted, with the corresponding semantics ::
(0, 0, { values }) link to a new record that needs to be created with the given values dictionary
(1, ID, { values }) update the linked record with id = ID (write *values* on it)
(2, ID) remove and delete the linked record with id = ID (calls unlink on ID, that will delete the object completely, and the link to it as well)
Example:
[(0, 0, {'field_name':field_value_record1, ...}), (0, 0, {'field_name':field_value_record2, ...})]
+ For a many2one field, simply use the ID of target record, which must already exist, or ``False`` to remove the link.
+ For a reference field, use a string with the model name, a comma, and the target object id (example: ``'product.product, 5'``)
"""
readonly = None
self.check_field_access_rights(cr, user, 'write', vals.keys())
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
elif field in self._inherit_fields:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name=%s and module=%s and model=%s) and uid=%s", \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
if not edit:
vals.pop(field)
if not context:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
self._check_concurrency(cr, ids, context)
self.check_access_rights(cr, user, 'write')
result = self._store_get_values(cr, user, ids, vals.keys(), context) or []
# No direct update of parent_left/right
vals.pop('parent_left', None)
vals.pop('parent_right', None)
parents_changed = []
parent_order = self._parent_order or self._order
if self._parent_store and (self._parent_name in vals):
# The parent_left/right computation may take up to
# 5 seconds. No need to recompute the values if the
# parent is the same.
# Note: to respect parent_order, nodes must be processed in
# order, so ``parents_changed`` must be ordered properly.
parent_val = vals[self._parent_name]
if parent_val:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s != %%s OR %s IS NULL) ORDER BY %s" % \
(self._table, self._parent_name, self._parent_name, parent_order)
cr.execute(query, (tuple(ids), parent_val))
else:
query = "SELECT id FROM %s WHERE id IN %%s AND (%s IS NOT NULL) ORDER BY %s" % \
(self._table, self._parent_name, parent_order)
cr.execute(query, (tuple(ids),))
parents_changed = map(operator.itemgetter(0), cr.fetchall())
upd0 = []
upd1 = []
upd_todo = []
updend = []
direct = []
totranslate = context.get('lang', False) and (context['lang'] != 'en_US')
for field in vals:
field_column = self._all_columns.get(field) and self._all_columns.get(field).column
if field_column and field_column.deprecated:
_logger.warning('Field %s.%s is deprecated: %s', self._name, field, field_column.deprecated)
if field in self._columns:
if self._columns[field]._classic_write and not (hasattr(self._columns[field], '_fnct_inv')):
if (not totranslate) or not self._columns[field].translate:
upd0.append('"'+field+'"='+self._columns[field]._symbol_set[0])
upd1.append(self._columns[field]._symbol_set[1](vals[field]))
direct.append(field)
else:
upd_todo.append(field)
else:
updend.append(field)
if field in self._columns \
and hasattr(self._columns[field], 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
upd0.append('write_uid=%s')
upd0.append("write_date=(now() at time zone 'UTC')")
upd1.append(user)
if len(upd0):
self.check_access_rule(cr, user, ids, 'write', context=context)
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('update ' + self._table + ' set ' + ','.join(upd0) + ' ' \
'where id IN %s', upd1 + [sub_ids])
if cr.rowcount != len(sub_ids):
raise except_orm(_('AccessError'),
_('One of the records you are trying to modify has already been deleted (Document type: %s).') % self._description)
if totranslate:
# TODO: optimize
for f in direct:
if self._columns[f].translate:
src_trans = self.pool[self._name].read(cr, user, ids, [f])[0][f]
if not src_trans:
src_trans = vals[f]
# Inserting value to DB
context_wo_lang = dict(context, lang=None)
self.write(cr, user, ids, {f: vals[f]}, context=context_wo_lang)
self.pool.get('ir.translation')._set_ids(cr, user, self._name+','+f, 'model', context['lang'], ids, vals[f], src_trans)
# call the 'set' method of fields which are not classic_write
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
# default element in context must be removed when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
for field in upd_todo:
for id in ids:
result += self._columns[field].set(cr, self, id, field, vals[field], user, context=rel_context) or []
unknown_fields = updend[:]
for table in self._inherits:
col = self._inherits[table]
nids = []
for sub_ids in cr.split_for_in_conditions(ids):
cr.execute('select distinct "'+col+'" from "'+self._table+'" ' \
'where id IN %s', (sub_ids,))
nids.extend([x[0] for x in cr.fetchall()])
v = {}
for val in updend:
if self._inherit_fields[val][0] == table:
v[val] = vals[val]
unknown_fields.remove(val)
if v:
self.pool[table].write(cr, user, nids, v, context)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
self._validate(cr, user, ids, context)
# TODO: use _order to set dest at the right position and not first node of parent
# We can't defer parent_store computation because the stored function
# fields that are computer may refer (directly or indirectly) to
# parent_left/right (via a child_of domain)
if parents_changed:
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
order = self._parent_order or self._order
parent_val = vals[self._parent_name]
if parent_val:
clause, params = '%s=%%s' % (self._parent_name,), (parent_val,)
else:
clause, params = '%s IS NULL' % (self._parent_name,), ()
for id in parents_changed:
cr.execute('SELECT parent_left, parent_right FROM %s WHERE id=%%s' % (self._table,), (id,))
pleft, pright = cr.fetchone()
distance = pright - pleft + 1
# Positions of current siblings, to locate proper insertion point;
# this can _not_ be fetched outside the loop, as it needs to be refreshed
# after each update, in case several nodes are sequentially inserted one
# next to the other (i.e computed incrementally)
cr.execute('SELECT parent_right, id FROM %s WHERE %s ORDER BY %s' % (self._table, clause, parent_order), params)
parents = cr.fetchall()
# Find Position of the element
position = None
for (parent_pright, parent_id) in parents:
if parent_id == id:
break
position = parent_pright and parent_pright + 1 or 1
# It's the first node of the parent
if not position:
if not parent_val:
position = 1
else:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent_val,))
position = cr.fetchone()[0] + 1
if pleft < position <= pright:
raise except_orm(_('UserError'), _('Recursivity Detected.'))
if pleft < position:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left+%s, parent_right=parent_right+%s where parent_left>=%s and parent_left<%s', (position-pleft, position-pleft, pleft, pright))
else:
cr.execute('update '+self._table+' set parent_left=parent_left+%s where parent_left>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_right=parent_right+%s where parent_right>=%s', (distance, position))
cr.execute('update '+self._table+' set parent_left=parent_left-%s, parent_right=parent_right-%s where parent_left>=%s and parent_left<%s', (pleft-position+distance, pleft-position+distance, pleft+distance, pright+distance))
result += self._store_get_values(cr, user, ids, vals.keys(), context)
result.sort()
done = {}
for order, model_name, ids_to_update, fields_to_recompute in result:
key = (model_name, tuple(fields_to_recompute))
done.setdefault(key, {})
# avoid to do several times the same computation
todo = []
for id in ids_to_update:
if id not in done[key]:
done[key][id] = True
todo.append(id)
self.pool[model_name]._store_set_values(cr, user, todo, fields_to_recompute, context)
self.step_workflow(cr, user, ids, context=context)
return True
#
# TODO: Should set perm to user.xxx
#
def create(self, cr, user, vals, context=None):
"""
Create a new record for the model.
The values for the new record are initialized using the ``vals``
argument, and if necessary the result of ``default_get()``.
:param cr: database cursor
:param user: current user id
:type user: integer
:param vals: field values for new record, e.g {'field_name': field_value, ...}
:type vals: dictionary
:param context: optional context arguments, e.g. {'lang': 'en_us', 'tz': 'UTC', ...}
:type context: dictionary
:return: id of new record created
:raise AccessError: * if user has no create rights on the requested object
* if user tries to bypass access rules for create on the requested object
:raise ValidateError: if user tries to enter invalid value for a field that is not in selection
:raise UserError: if a loop would be created in a hierarchy of objects a result of the operation (such as setting an object as its own parent)
**Note**: The type of field values to pass in ``vals`` for relationship fields is specific.
Please see the description of the :py:meth:`~osv.osv.osv.write` method for details about the possible values and how
to specify them.
"""
if not context:
context = {}
if self.is_transient():
self._transient_vacuum(cr, user)
self.check_access_rights(cr, user, 'create')
if self._log_access:
for f in LOG_ACCESS_COLUMNS:
if vals.pop(f, None) is not None:
_logger.warning(
'Field `%s` is not allowed when creating the model `%s`.',
f, self._name)
vals = self._add_missing_default_values(cr, user, vals, context)
tocreate = {}
for v in self._inherits:
if self._inherits[v] not in vals:
tocreate[v] = {}
else:
tocreate[v] = {'id': vals[self._inherits[v]]}
columns = [
# columns will contain a list of field defined as a tuple
# tuple(field_name, format_string, field_value)
# the tuple will be used by the string formatting for the INSERT
# statement.
('id', "nextval('%s')" % self._sequence),
]
upd_todo = []
unknown_fields = []
for v in vals.keys():
if v in self._inherit_fields and v not in self._columns:
(table, col, col_detail, original_parent) = self._inherit_fields[v]
tocreate[table][v] = vals[v]
del vals[v]
else:
if (v not in self._inherit_fields) and (v not in self._columns):
del vals[v]
unknown_fields.append(v)
if unknown_fields:
_logger.warning(
'No such field(s) in model %s: %s.',
self._name, ', '.join(unknown_fields))
if not self._sequence:
raise except_orm(
_('UserError'),
_('You cannot perform this operation. New Record Creation is not allowed for this object as this object is for reporting purpose.')
)
for table in tocreate:
if self._inherits[table] in vals:
del vals[self._inherits[table]]
record_id = tocreate[table].pop('id', None)
# When linking/creating parent records, force context without 'no_store_function' key that
# defers stored functions computing, as these won't be computed in batch at the end of create().
parent_context = dict(context)
parent_context.pop('no_store_function', None)
if record_id is None or not record_id:
record_id = self.pool[table].create(cr, user, tocreate[table], context=parent_context)
else:
self.pool[table].write(cr, user, [record_id], tocreate[table], context=parent_context)
columns.append((self._inherits[table], '%s', record_id))
#Start : Set bool fields to be False if they are not touched(to make search more powerful)
bool_fields = [x for x in self._columns.keys() if self._columns[x]._type=='boolean']
for bool_field in bool_fields:
if bool_field not in vals:
vals[bool_field] = False
#End
for field in vals.copy():
fobj = None
if field in self._columns:
fobj = self._columns[field]
else:
fobj = self._inherit_fields[field][2]
if not fobj:
continue
groups = fobj.write
if groups:
edit = False
for group in groups:
module = group.split(".")[0]
grp = group.split(".")[1]
cr.execute("select count(*) from res_groups_users_rel where gid IN (select res_id from ir_model_data where name='%s' and module='%s' and model='%s') and uid=%s" % \
(grp, module, 'res.groups', user))
readonly = cr.fetchall()
if readonly[0][0] >= 1:
edit = True
break
elif readonly[0][0] == 0:
edit = False
else:
edit = False
if not edit:
vals.pop(field)
for field in vals:
current_field = self._columns[field]
if current_field._classic_write:
columns.append((field, '%s', current_field._symbol_set[1](vals[field])))
#for the function fields that receive a value, we set them directly in the database
#(they may be required), but we also need to trigger the _fct_inv()
if (hasattr(current_field, '_fnct_inv')) and not isinstance(current_field, fields.related):
#TODO: this way to special case the related fields is really creepy but it shouldn't be changed at
#one week of the release candidate. It seems the only good way to handle correctly this is to add an
#attribute to make a field `really readonly´ and thus totally ignored by the create()... otherwise
#if, for example, the related has a default value (for usability) then the fct_inv is called and it
#may raise some access rights error. Changing this is a too big change for now, and is thus postponed
#after the release but, definitively, the behavior shouldn't be different for related and function
#fields.
upd_todo.append(field)
else:
#TODO: this `if´ statement should be removed because there is no good reason to special case the fields
#related. See the above TODO comment for further explanations.
if not isinstance(current_field, fields.related):
upd_todo.append(field)
if field in self._columns \
and hasattr(current_field, 'selection') \
and vals[field]:
self._check_selection_field_value(cr, user, field, vals[field], context=context)
if self._log_access:
columns.append(('create_uid', '%s', user))
columns.append(('write_uid', '%s', user))
columns.append(('create_date', "(now() at time zone 'UTC')"))
columns.append(('write_date', "(now() at time zone 'UTC')"))
# the list of tuples used in this formatting corresponds to
# tuple(field_name, format, value)
# In some case, for example (id, create_date, write_date) we does not
# need to read the third value of the tuple, because the real value is
# encoded in the second value (the format).
cr.execute(
"""INSERT INTO "%s" (%s) VALUES(%s) RETURNING id""" % (
self._table,
', '.join('"%s"' % f[0] for f in columns),
', '.join(f[1] for f in columns)
),
tuple([f[2] for f in columns if len(f) > 2])
)
id_new, = cr.fetchone()
upd_todo.sort(lambda x, y: self._columns[x].priority-self._columns[y].priority)
if self._parent_store and not context.get('defer_parent_store_computation'):
if self.pool._init:
self.pool._init_parent[self._name] = True
else:
parent = vals.get(self._parent_name, False)
if parent:
cr.execute('select parent_right from '+self._table+' where '+self._parent_name+'=%s order by '+(self._parent_order or self._order), (parent,))
pleft_old = None
result_p = cr.fetchall()
for (pleft,) in result_p:
if not pleft:
break
pleft_old = pleft
if not pleft_old:
cr.execute('select parent_left from '+self._table+' where id=%s', (parent,))
pleft_old = cr.fetchone()[0]
pleft = pleft_old
else:
cr.execute('select max(parent_right) from '+self._table)
pleft = cr.fetchone()[0] or 0
cr.execute('update '+self._table+' set parent_left=parent_left+2 where parent_left>%s', (pleft,))
cr.execute('update '+self._table+' set parent_right=parent_right+2 where parent_right>%s', (pleft,))
cr.execute('update '+self._table+' set parent_left=%s,parent_right=%s where id=%s', (pleft+1, pleft+2, id_new))
# default element in context must be remove when call a one2many or many2many
rel_context = context.copy()
for c in context.items():
if c[0].startswith('default_'):
del rel_context[c[0]]
result = []
for field in upd_todo:
result += self._columns[field].set(cr, self, id_new, field, vals[field], user, rel_context) or []
self._validate(cr, user, [id_new], context)
if not context.get('no_store_function', False):
result += self._store_get_values(cr, user, [id_new],
list(set(vals.keys() + self._inherits.values())),
context)
result.sort()
done = []
for order, model_name, ids, fields2 in result:
if not (model_name, ids, fields2) in done:
self.pool[model_name]._store_set_values(cr, user, ids, fields2, context)
done.append((model_name, ids, fields2))
if self._log_create and not (context and context.get('no_store_function', False)):
message = self._description + \
" '" + \
self.name_get(cr, user, [id_new], context=context)[0][1] + \
"' " + _("created.")
self.log(cr, user, id_new, message, True, context=context)
self.check_access_rule(cr, user, [id_new], 'create', context=context)
self.create_workflow(cr, user, [id_new], context=context)
return id_new
def browse(self, cr, uid, select, context=None, list_class=None, fields_process=None):
"""Fetch records as objects allowing to use dot notation to browse fields and relations
:param cr: database cursor
:param uid: current user id
:param select: id or list of ids.
:param context: context arguments, like lang, time zone
:rtype: object or list of objects requested
"""
self._list_class = list_class or browse_record_list
cache = {}
# need to accepts ints and longs because ids coming from a method
# launched by button in the interface have a type long...
if isinstance(select, (int, long)):
return browse_record(cr, uid, select, self, cache, context=context, list_class=self._list_class, fields_process=fields_process)
elif isinstance(select, list):
return self._list_class((browse_record(cr, uid, id, self, cache, context=context, list_class=self._list_class, fields_process=fields_process) for id in select), context=context)
else:
return browse_null()
def _store_get_values(self, cr, uid, ids, fields, context):
"""Returns an ordered list of fields.function to call due to
an update operation on ``fields`` of records with ``ids``,
obtained by calling the 'store' triggers of these fields,
as setup by their 'store' attribute.
:return: [(priority, model_name, [record_ids,], [function_fields,])]
"""
if fields is None: fields = []
stored_functions = self.pool._store_function.get(self._name, [])
# use indexed names for the details of the stored_functions:
model_name_, func_field_to_compute_, target_ids_func_, trigger_fields_, priority_ = range(5)
# only keep store triggers that should be triggered for the ``fields``
# being written to.
triggers_to_compute = [f for f in stored_functions \
if ((not f[trigger_fields_]) or set(fields).intersection(f[trigger_fields_]))]
to_compute_map = {}
target_id_results = {}
for store_trigger in triggers_to_compute:
target_func_id_ = id(store_trigger[target_ids_func_])
if not target_func_id_ in target_id_results:
# use admin user for accessing objects having rules defined on store fields
target_id_results[target_func_id_] = [i for i in store_trigger[target_ids_func_](self, cr, SUPERUSER_ID, ids, context) if i]
target_ids = target_id_results[target_func_id_]
# the compound key must consider the priority and model name
key = (store_trigger[priority_], store_trigger[model_name_])
for target_id in target_ids:
to_compute_map.setdefault(key, {}).setdefault(target_id,set()).add(tuple(store_trigger))
# Here to_compute_map looks like:
# { (10, 'model_a') : { target_id1: [ (trigger_1_tuple, trigger_2_tuple) ], ... }
# (20, 'model_a') : { target_id2: [ (trigger_3_tuple, trigger_4_tuple) ], ... }
# (99, 'model_a') : { target_id1: [ (trigger_5_tuple, trigger_6_tuple) ], ... }
# }
# Now we need to generate the batch function calls list
# call_map =
# { (10, 'model_a') : [(10, 'model_a', [record_ids,], [function_fields,])] }
call_map = {}
for ((priority,model), id_map) in to_compute_map.iteritems():
trigger_ids_maps = {}
# function_ids_maps =
# { (function_1_tuple, function_2_tuple) : [target_id1, target_id2, ..] }
for target_id, triggers in id_map.iteritems():
trigger_ids_maps.setdefault(tuple(triggers), []).append(target_id)
for triggers, target_ids in trigger_ids_maps.iteritems():
call_map.setdefault((priority,model),[]).append((priority, model, target_ids,
[t[func_field_to_compute_] for t in triggers]))
ordered_keys = call_map.keys()
ordered_keys.sort()
result = []
if ordered_keys:
result = reduce(operator.add, (call_map[k] for k in ordered_keys))
return result
def _store_set_values(self, cr, uid, ids, fields, context):
"""Calls the fields.function's "implementation function" for all ``fields``, on records with ``ids`` (taking care of
respecting ``multi`` attributes), and stores the resulting values in the database directly."""
if not ids:
return True
field_flag = False
field_dict = {}
if self._log_access:
cr.execute('select id,write_date from '+self._table+' where id IN %s', (tuple(ids),))
res = cr.fetchall()
for r in res:
if r[1]:
field_dict.setdefault(r[0], [])
res_date = time.strptime((r[1])[:19], '%Y-%m-%d %H:%M:%S')
write_date = datetime.datetime.fromtimestamp(time.mktime(res_date))
for i in self.pool._store_function.get(self._name, []):
if i[5]:
up_write_date = write_date + datetime.timedelta(hours=i[5])
if datetime.datetime.now() < up_write_date:
if i[1] in fields:
field_dict[r[0]].append(i[1])
if not field_flag:
field_flag = True
todo = {}
keys = []
for f in fields:
if self._columns[f]._multi not in keys:
keys.append(self._columns[f]._multi)
todo.setdefault(self._columns[f]._multi, [])
todo[self._columns[f]._multi].append(f)
for key in keys:
val = todo[key]
if key:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[val[0]].get(cr, self, ids, val, SUPERUSER_ID, context=context)
for id, value in result.items():
if field_flag:
for f in value.keys():
if f in field_dict[id]:
value.pop(f)
upd0 = []
upd1 = []
for v in value:
if v not in val:
continue
if self._columns[v]._type == 'many2one':
try:
value[v] = value[v][0]
except:
pass
upd0.append('"'+v+'"='+self._columns[v]._symbol_set[0])
upd1.append(self._columns[v]._symbol_set[1](value[v]))
upd1.append(id)
if upd0 and upd1:
cr.execute('update "' + self._table + '" set ' + \
','.join(upd0) + ' where id = %s', upd1)
else:
for f in val:
# use admin user for accessing objects having rules defined on store fields
result = self._columns[f].get(cr, self, ids, f, SUPERUSER_ID, context=context)
for r in result.keys():
if field_flag:
if r in field_dict.keys():
if f in field_dict[r]:
result.pop(r)
for id, value in result.items():
if self._columns[f]._type == 'many2one':
try:
value = value[0]
except:
pass
cr.execute('update "' + self._table + '" set ' + \
'"'+f+'"='+self._columns[f]._symbol_set[0] + ' where id = %s', (self._columns[f]._symbol_set[1](value), id))
return True
#
# TODO: Validate
#
def perm_write(self, cr, user, ids, fields, context=None):
raise NotImplementedError(_('This method does not exist anymore'))
# TODO: ameliorer avec NULL
def _where_calc(self, cr, user, domain, active_test=True, context=None):
"""Computes the WHERE clause needed to implement an OpenERP domain.
:param domain: the domain to compute
:type domain: list
:param active_test: whether the default filtering of records with ``active``
field set to ``False`` should be applied.
:return: the query expressing the given domain as provided in domain
:rtype: osv.query.Query
"""
if not context:
context = {}
domain = domain[:]
# if the object has a field named 'active', filter out all inactive
# records unless they were explicitely asked for
if 'active' in self._all_columns and (active_test and context.get('active_test', True)):
if domain:
# the item[0] trick below works for domain items and '&'/'|'/'!'
# operators too
if not any(item[0] == 'active' for item in domain):
domain.insert(0, ('active', '=', 1))
else:
domain = [('active', '=', 1)]
if domain:
e = expression.expression(cr, user, domain, self, context)
tables = e.get_tables()
where_clause, where_params = e.to_sql()
where_clause = where_clause and [where_clause] or []
else:
where_clause, where_params, tables = [], [], ['"%s"' % self._table]
return Query(tables, where_clause, where_params)
def _check_qorder(self, word):
if not regex_order.match(word):
raise except_orm(_('AccessError'), _('Invalid "order" specified. A valid "order" specification is a comma-separated list of valid field names (optionally followed by asc/desc for the direction)'))
return True
def _apply_ir_rules(self, cr, uid, query, mode='read', context=None):
"""Add what's missing in ``query`` to implement all appropriate ir.rules
(using the ``model_name``'s rules or the current model's rules if ``model_name`` is None)
:param query: the current query object
"""
if uid == SUPERUSER_ID:
return
def apply_rule(added_clause, added_params, added_tables, parent_model=None, child_object=None):
""" :param string parent_model: string of the parent model
:param model child_object: model object, base of the rule application
"""
if added_clause:
if parent_model and child_object:
# as inherited rules are being applied, we need to add the missing JOIN
# to reach the parent table (if it was not JOINed yet in the query)
parent_alias = child_object._inherits_join_add(child_object, parent_model, query)
# inherited rules are applied on the external table -> need to get the alias and replace
parent_table = self.pool[parent_model]._table
added_clause = [clause.replace('"%s"' % parent_table, '"%s"' % parent_alias) for clause in added_clause]
# change references to parent_table to parent_alias, because we now use the alias to refer to the table
new_tables = []
for table in added_tables:
# table is just a table name -> switch to the full alias
if table == '"%s"' % parent_table:
new_tables.append('"%s" as "%s"' % (parent_table, parent_alias))
# table is already a full statement -> replace reference to the table to its alias, is correct with the way aliases are generated
else:
new_tables.append(table.replace('"%s"' % parent_table, '"%s"' % parent_alias))
added_tables = new_tables
query.where_clause += added_clause
query.where_clause_params += added_params
for table in added_tables:
if table not in query.tables:
query.tables.append(table)
return True
return False
# apply main rules on the object
rule_obj = self.pool.get('ir.rule')
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, self._name, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables)
# apply ir.rules from the parents (through _inherits)
for inherited_model in self._inherits:
rule_where_clause, rule_where_clause_params, rule_tables = rule_obj.domain_get(cr, uid, inherited_model, mode, context=context)
apply_rule(rule_where_clause, rule_where_clause_params, rule_tables,
parent_model=inherited_model, child_object=self)
def _generate_m2o_order_by(self, order_field, query):
"""
Add possibly missing JOIN to ``query`` and generate the ORDER BY clause for m2o fields,
either native m2o fields or function/related fields that are stored, including
intermediate JOINs for inheritance if required.
:return: the qualified field name to use in an ORDER BY clause to sort by ``order_field``
"""
if order_field not in self._columns and order_field in self._inherit_fields:
# also add missing joins for reaching the table containing the m2o field
qualified_field = self._inherits_join_calc(order_field, query)
order_field_column = self._inherit_fields[order_field][2]
else:
qualified_field = '"%s"."%s"' % (self._table, order_field)
order_field_column = self._columns[order_field]
assert order_field_column._type == 'many2one', 'Invalid field passed to _generate_m2o_order_by()'
if not order_field_column._classic_write and not getattr(order_field_column, 'store', False):
_logger.debug("Many2one function/related fields must be stored " \
"to be used as ordering fields! Ignoring sorting for %s.%s",
self._name, order_field)
return
# figure out the applicable order_by for the m2o
dest_model = self.pool[order_field_column._obj]
m2o_order = dest_model._order
if not regex_order.match(m2o_order):
# _order is complex, can't use it here, so we default to _rec_name
m2o_order = dest_model._rec_name
else:
# extract the field names, to be able to qualify them and add desc/asc
m2o_order_list = []
for order_part in m2o_order.split(","):
m2o_order_list.append(order_part.strip().split(" ", 1)[0].strip())
m2o_order = m2o_order_list
# Join the dest m2o table if it's not joined yet. We use [LEFT] OUTER join here
# as we don't want to exclude results that have NULL values for the m2o
src_table, src_field = qualified_field.replace('"', '').split('.', 1)
dst_alias, dst_alias_statement = query.add_join((src_table, dest_model._table, src_field, 'id', src_field), implicit=False, outer=True)
qualify = lambda field: '"%s"."%s"' % (dst_alias, field)
return map(qualify, m2o_order) if isinstance(m2o_order, list) else qualify(m2o_order)
def _generate_order_by(self, order_spec, query):
"""
Attempt to consruct an appropriate ORDER BY clause based on order_spec, which must be
a comma-separated list of valid field names, optionally followed by an ASC or DESC direction.
:raise" except_orm in case order_spec is malformed
"""
order_by_clause = ''
order_spec = order_spec or self._order
if order_spec:
order_by_elements = []
self._check_qorder(order_spec)
for order_part in order_spec.split(','):
order_split = order_part.strip().split(' ')
order_field = order_split[0].strip()
order_direction = order_split[1].strip() if len(order_split) == 2 else ''
inner_clause = None
if order_field == 'id' or (self._log_access and order_field in LOG_ACCESS_COLUMNS.keys()):
order_by_elements.append('"%s"."%s" %s' % (self._table, order_field, order_direction))
elif order_field in self._columns:
order_column = self._columns[order_field]
if order_column._classic_read:
inner_clause = '"%s"."%s"' % (self._table, order_field)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
elif order_field in self._inherit_fields:
parent_obj = self.pool[self._inherit_fields[order_field][3]]
order_column = parent_obj._columns[order_field]
if order_column._classic_read:
inner_clause = self._inherits_join_calc(order_field, query)
elif order_column._type == 'many2one':
inner_clause = self._generate_m2o_order_by(order_field, query)
else:
continue # ignore non-readable or "non-joinable" fields
else:
raise ValueError( _("Sorting field %s not found on model %s") %( order_field, self._name))
if inner_clause:
if isinstance(inner_clause, list):
for clause in inner_clause:
order_by_elements.append("%s %s" % (clause, order_direction))
else:
order_by_elements.append("%s %s" % (inner_clause, order_direction))
if order_by_elements:
order_by_clause = ",".join(order_by_elements)
return order_by_clause and (' ORDER BY %s ' % order_by_clause) or ''
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
"""
Private implementation of search() method, allowing specifying the uid to use for the access right check.
This is useful for example when filling in the selection list for a drop-down and avoiding access rights errors,
by specifying ``access_rights_uid=1`` to bypass access rights check, but not ir.rules!
This is ok at the security level because this method is private and not callable through XML-RPC.
:param access_rights_uid: optional user ID to use when checking access rights
(not for ir.rules, this is only for ir.model.access)
"""
if context is None:
context = {}
self.check_access_rights(cr, access_rights_uid or user, 'read')
# For transient models, restrict acces to the current user, except for the super-user
if self.is_transient() and self._log_access and user != SUPERUSER_ID:
args = expression.AND(([('create_uid', '=', user)], args or []))
query = self._where_calc(cr, user, args, context=context)
self._apply_ir_rules(cr, user, query, 'read', context=context)
order_by = self._generate_order_by(order, query)
from_clause, where_clause, where_clause_params = query.get_sql()
limit_str = limit and ' limit %d' % limit or ''
offset_str = offset and ' offset %d' % offset or ''
where_str = where_clause and (" WHERE %s" % where_clause) or ''
query_str = 'SELECT "%s".id FROM ' % self._table + from_clause + where_str + order_by + limit_str + offset_str
if count:
# /!\ the main query must be executed as a subquery, otherwise
# offset and limit apply to the result of count()!
cr.execute('SELECT count(*) FROM (%s) AS count' % query_str, where_clause_params)
res = cr.fetchone()
return res[0]
cr.execute(query_str, where_clause_params)
res = cr.fetchall()
# TDE note: with auto_join, we could have several lines about the same result
# i.e. a lead with several unread messages; we uniquify the result using
# a fast way to do it while preserving order (http://www.peterbe.com/plog/uniqifiers-benchmark)
def _uniquify_list(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
return _uniquify_list([x[0] for x in res])
# returns the different values ever entered for one field
# this is used, for example, in the client when the user hits enter on
# a char field
def distinct_field_get(self, cr, uid, field, value, args=None, offset=0, limit=None):
if not args:
args = []
if field in self._inherit_fields:
return self.pool[self._inherit_fields[field][0]].distinct_field_get(cr, uid, field, value, args, offset, limit)
else:
return self._columns[field].search(cr, self, args, field, value, offset, limit, uid)
def copy_data(self, cr, uid, id, default=None, context=None):
"""
Copy given record's data with all its fields values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: field values to override in the original values of the copied record
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: dictionary containing all the field values
"""
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_data_seen', {})
if id in seen_map.setdefault(self._name, []):
return
seen_map[self._name].append(id)
if default is None:
default = {}
if 'state' not in default:
if 'state' in self._defaults:
if callable(self._defaults['state']):
default['state'] = self._defaults['state'](self, cr, uid, context)
else:
default['state'] = self._defaults['state']
# build a black list of fields that should not be copied
blacklist = set(MAGIC_COLUMNS + ['parent_left', 'parent_right'])
def blacklist_given_fields(obj):
# blacklist the fields that are given by inheritance
for other, field_to_other in obj._inherits.items():
blacklist.add(field_to_other)
if field_to_other in default:
# all the fields of 'other' are given by the record: default[field_to_other],
# except the ones redefined in self
blacklist.update(set(self.pool[other]._all_columns) - set(self._columns))
else:
blacklist_given_fields(self.pool[other])
# blacklist deprecated fields
for name, field in obj._columns.items():
if field.deprecated:
blacklist.add(name)
blacklist_given_fields(self)
fields_to_copy = dict((f,fi) for f, fi in self._all_columns.iteritems()
if f not in default
if f not in blacklist
if not isinstance(fi.column, fields.function))
data = self.read(cr, uid, [id], fields_to_copy.keys(), context=context)
if data:
data = data[0]
else:
raise IndexError( _("Record #%d of %s not found, cannot copy!") %( id, self._name))
res = dict(default)
for f, colinfo in fields_to_copy.iteritems():
field = colinfo.column
if field._type == 'many2one':
res[f] = data[f] and data[f][0]
elif field._type == 'one2many':
other = self.pool[field._obj]
# duplicate following the order of the ids because we'll rely on
# it later for copying translations in copy_translation()!
lines = [other.copy_data(cr, uid, line_id, context=context) for line_id in sorted(data[f])]
# the lines are duplicated using the wrong (old) parent, but then
# are reassigned to the correct one thanks to the (0, 0, ...)
res[f] = [(0, 0, line) for line in lines if line]
elif field._type == 'many2many':
res[f] = [(6, 0, data[f])]
else:
res[f] = data[f]
return res
def copy_translations(self, cr, uid, old_id, new_id, context=None):
if context is None:
context = {}
# avoid recursion through already copied records in case of circular relationship
seen_map = context.setdefault('__copy_translations_seen',{})
if old_id in seen_map.setdefault(self._name,[]):
return
seen_map[self._name].append(old_id)
trans_obj = self.pool.get('ir.translation')
# TODO it seems fields_get can be replaced by _all_columns (no need for translation)
fields = self.fields_get(cr, uid, context=context)
for field_name, field_def in fields.items():
# removing the lang to compare untranslated values
context_wo_lang = dict(context, lang=None)
old_record, new_record = self.browse(cr, uid, [old_id, new_id], context=context_wo_lang)
# we must recursively copy the translations for o2o and o2m
if field_def['type'] == 'one2many':
target_obj = self.pool[field_def['relation']]
# here we rely on the order of the ids to match the translations
# as foreseen in copy_data()
old_children = sorted(r.id for r in old_record[field_name])
new_children = sorted(r.id for r in new_record[field_name])
for (old_child, new_child) in zip(old_children, new_children):
target_obj.copy_translations(cr, uid, old_child, new_child, context=context)
# and for translatable fields we keep them for copy
elif field_def.get('translate'):
if field_name in self._columns:
trans_name = self._name + "," + field_name
target_id = new_id
source_id = old_id
elif field_name in self._inherit_fields:
trans_name = self._inherit_fields[field_name][0] + "," + field_name
# get the id of the parent record to set the translation
inherit_field_name = self._inherit_fields[field_name][1]
target_id = new_record[inherit_field_name].id
source_id = old_record[inherit_field_name].id
else:
continue
trans_ids = trans_obj.search(cr, uid, [
('name', '=', trans_name),
('res_id', '=', source_id)
])
user_lang = context.get('lang')
for record in trans_obj.read(cr, uid, trans_ids, context=context):
del record['id']
# remove source to avoid triggering _set_src
del record['source']
record.update({'res_id': target_id})
if user_lang and user_lang == record['lang']:
# 'source' to force the call to _set_src
# 'value' needed if value is changed in copy(), want to see the new_value
record['source'] = old_record[field_name]
record['value'] = new_record[field_name]
trans_obj.create(cr, uid, record, context=context)
def copy(self, cr, uid, id, default=None, context=None):
"""
Duplicate record with given id updating it with default values
:param cr: database cursor
:param uid: current user id
:param id: id of the record to copy
:param default: dictionary of field values to override in the original values of the copied record, e.g: ``{'field_name': overriden_value, ...}``
:type default: dictionary
:param context: context arguments, like lang, time zone
:type context: dictionary
:return: id of the newly created record
"""
if context is None:
context = {}
context = context.copy()
data = self.copy_data(cr, uid, id, default, context)
new_id = self.create(cr, uid, data, context)
self.copy_translations(cr, uid, id, new_id, context)
return new_id
def exists(self, cr, uid, ids, context=None):
"""Checks whether the given id or ids exist in this model,
and return the list of ids that do. This is simple to use for
a truth test on a browse_record::
if record.exists():
pass
:param ids: id or list of ids to check for existence
:type ids: int or [int]
:return: the list of ids that currently exist, out of
the given `ids`
"""
if type(ids) in (int, long):
ids = [ids]
if not ids:
return []
query = 'SELECT id FROM "%s"' % self._table
cr.execute(query + "WHERE ID IN %s", (tuple(ids),))
return [x[0] for x in cr.fetchall()]
def check_recursion(self, cr, uid, ids, context=None, parent=None):
_logger.warning("You are using deprecated %s.check_recursion(). Please use the '_check_recursion()' instead!" % \
self._name)
assert parent is None or parent in self._columns or parent in self._inherit_fields,\
"The 'parent' parameter passed to check_recursion() must be None or a valid field name"
return self._check_recursion(cr, uid, ids, context, parent)
def _check_recursion(self, cr, uid, ids, context=None, parent=None):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param parent: optional parent field name (default: ``self._parent_name = parent_id``)
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
if not parent:
parent = self._parent_name
# must ignore 'active' flag, ir.rules, etc. => direct SQL query
query = 'SELECT "%s" FROM "%s" WHERE id = %%s' % (parent, self._table)
for id in ids:
current_id = id
while current_id is not None:
cr.execute(query, (current_id,))
result = cr.fetchone()
current_id = result[0] if result else None
if current_id == id:
return False
return True
def _check_m2m_recursion(self, cr, uid, ids, field_name):
"""
Verifies that there is no loop in a hierarchical structure of records,
by following the parent relationship using the **parent** field until a loop
is detected or until a top-level record is found.
:param cr: database cursor
:param uid: current user id
:param ids: list of ids of records to check
:param field_name: field to check
:return: **True** if the operation can proceed safely, or **False** if an infinite loop is detected.
"""
field = self._all_columns.get(field_name)
field = field.column if field else None
if not field or field._type != 'many2many' or field._obj != self._name:
# field must be a many2many on itself
raise ValueError('invalid field_name: %r' % (field_name,))
query = 'SELECT distinct "%s" FROM "%s" WHERE "%s" IN %%s' % (field._id2, field._rel, field._id1)
ids_parent = ids[:]
while ids_parent:
ids_parent2 = []
for i in range(0, len(ids_parent), cr.IN_MAX):
j = i + cr.IN_MAX
sub_ids_parent = ids_parent[i:j]
cr.execute(query, (tuple(sub_ids_parent),))
ids_parent2.extend(filter(None, map(lambda x: x[0], cr.fetchall())))
ids_parent = ids_parent2
for i in ids_parent:
if i in ids:
return False
return True
def _get_external_ids(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID(s) of any database record.
**Synopsis**: ``_get_xml_ids(cr, uid, ids) -> { 'id': ['module.xml_id'] }``
:return: map of ids to the list of their fully qualified External IDs
in the form ``module.key``, or an empty list when there's no External
ID for a record, e.g.::
{ 'id': ['module.ext_id', 'module.ext_id_bis'],
'id2': [] }
"""
ir_model_data = self.pool.get('ir.model.data')
data_ids = ir_model_data.search(cr, uid, [('model', '=', self._name), ('res_id', 'in', ids)])
data_results = ir_model_data.read(cr, uid, data_ids, ['module', 'name', 'res_id'])
result = {}
for id in ids:
# can't use dict.fromkeys() as the list would be shared!
result[id] = []
for record in data_results:
result[record['res_id']].append('%(module)s.%(name)s' % record)
return result
def get_external_id(self, cr, uid, ids, *args, **kwargs):
"""Retrieve the External ID of any database record, if there
is one. This method works as a possible implementation
for a function field, to be able to add it to any
model object easily, referencing it as ``Model.get_external_id``.
When multiple External IDs exist for a record, only one
of them is returned (randomly).
:return: map of ids to their fully qualified XML ID,
defaulting to an empty string when there's none
(to be usable as a function field),
e.g.::
{ 'id': 'module.ext_id',
'id2': '' }
"""
results = self._get_xml_ids(cr, uid, ids)
for k, v in results.iteritems():
if results[k]:
results[k] = v[0]
else:
results[k] = ''
return results
# backwards compatibility
get_xml_id = get_external_id
_get_xml_ids = _get_external_ids
def print_report(self, cr, uid, ids, name, data, context=None):
"""
Render the report `name` for the given IDs. The report must be defined
for this model, not another.
"""
report = self.pool['ir.actions.report.xml']._lookup_report(cr, name)
assert self._name == report.table
return report.create(cr, uid, ids, data, context)
# Transience
def is_transient(self):
""" Return whether the model is transient.
See :class:`TransientModel`.
"""
return self._transient
def _transient_clean_rows_older_than(self, cr, seconds):
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
# Never delete rows used in last 5 minutes
seconds = max(seconds, 300)
query = ("SELECT id FROM " + self._table + " WHERE"
" COALESCE(write_date, create_date, (now() at time zone 'UTC'))::timestamp"
" < ((now() at time zone 'UTC') - interval %s)")
cr.execute(query, ("%s seconds" % seconds,))
ids = [x[0] for x in cr.fetchall()]
self.unlink(cr, SUPERUSER_ID, ids)
def _transient_clean_old_rows(self, cr, max_count):
# Check how many rows we have in the table
cr.execute("SELECT count(*) AS row_count FROM " + self._table)
res = cr.fetchall()
if res[0][0] <= max_count:
return # max not reached, nothing to do
self._transient_clean_rows_older_than(cr, 300)
def _transient_vacuum(self, cr, uid, force=False):
"""Clean the transient records.
This unlinks old records from the transient model tables whenever the
"_transient_max_count" or "_max_age" conditions (if any) are reached.
Actual cleaning will happen only once every "_transient_check_time" calls.
This means this method can be called frequently called (e.g. whenever
a new record is created).
Example with both max_hours and max_count active:
Suppose max_hours = 0.2 (e.g. 12 minutes), max_count = 20, there are 55 rows in the
table, 10 created/changed in the last 5 minutes, an additional 12 created/changed between
5 and 10 minutes ago, the rest created/changed more then 12 minutes ago.
- age based vacuum will leave the 22 rows created/changed in the last 12 minutes
- count based vacuum will wipe out another 12 rows. Not just 2, otherwise each addition
would immediately cause the maximum to be reached again.
- the 10 rows that have been created/changed the last 5 minutes will NOT be deleted
"""
assert self._transient, "Model %s is not transient, it cannot be vacuumed!" % self._name
_transient_check_time = 20 # arbitrary limit on vacuum executions
self._transient_check_count += 1
if not force and (self._transient_check_count < _transient_check_time):
return True # no vacuum cleaning this time
self._transient_check_count = 0
# Age-based expiration
if self._transient_max_hours:
self._transient_clean_rows_older_than(cr, self._transient_max_hours * 60 * 60)
# Count-based expiration
if self._transient_max_count:
self._transient_clean_old_rows(cr, self._transient_max_count)
return True
def resolve_2many_commands(self, cr, uid, field_name, commands, fields=None, context=None):
""" Serializes one2many and many2many commands into record dictionaries
(as if all the records came from the database via a read()). This
method is aimed at onchange methods on one2many and many2many fields.
Because commands might be creation commands, not all record dicts
will contain an ``id`` field. Commands matching an existing record
will have an ``id``.
:param field_name: name of the one2many or many2many field matching the commands
:type field_name: str
:param commands: one2many or many2many commands to execute on ``field_name``
:type commands: list((int|False, int|False, dict|False))
:param fields: list of fields to read from the database, when applicable
:type fields: list(str)
:returns: records in a shape similar to that returned by ``read()``
(except records may be missing the ``id`` field if they don't exist in db)
:rtype: list(dict)
"""
result = [] # result (list of dict)
record_ids = [] # ids of records to read
updates = {} # {id: dict} of updates on particular records
for command in commands:
if not isinstance(command, (list, tuple)):
record_ids.append(command)
elif command[0] == 0:
result.append(command[2])
elif command[0] == 1:
record_ids.append(command[1])
updates.setdefault(command[1], {}).update(command[2])
elif command[0] in (2, 3):
record_ids = [id for id in record_ids if id != command[1]]
elif command[0] == 4:
record_ids.append(command[1])
elif command[0] == 5:
result, record_ids = [], []
elif command[0] == 6:
result, record_ids = [], list(command[2])
# read the records and apply the updates
other_model = self.pool[self._all_columns[field_name].column._obj]
for record in other_model.read(cr, uid, record_ids, fields=fields, context=context):
record.update(updates.get(record['id'], {}))
result.append(record)
return result
# for backward compatibility
resolve_o2m_commands_to_record_dicts = resolve_2many_commands
def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
"""
Performs a ``search()`` followed by a ``read()``.
:param cr: database cursor
:param user: current user id
:param domain: Search domain, see ``args`` parameter in ``search()``. Defaults to an empty domain that will match all records.
:param fields: List of fields to read, see ``fields`` parameter in ``read()``. Defaults to all fields.
:param offset: Number of records to skip, see ``offset`` parameter in ``search()``. Defaults to 0.
:param limit: Maximum number of records to return, see ``limit`` parameter in ``search()``. Defaults to no limit.
:param order: Columns to sort result, see ``order`` parameter in ``search()``. Defaults to no sort.
:param context: context arguments.
:return: List of dictionaries containing the asked fields.
:rtype: List of dictionaries.
"""
record_ids = self.search(cr, uid, domain or [], offset=offset, limit=limit, order=order, context=context)
if not record_ids:
return []
if fields and fields == ['id']:
# shortcut read if we only want the ids
return [{'id': id} for id in record_ids]
# read() ignores active_test, but it would forward it to any downstream search call
# (e.g. for x2m or function fields), and this is not the desired behavior, the flag
# was presumably only meant for the main search().
# TODO: Move this to read() directly?
read_ctx = dict(context or {})
read_ctx.pop('active_test', None)
result = self.read(cr, uid, record_ids, fields, context=read_ctx)
if len(result) <= 1:
return result
# reorder read
index = dict((r['id'], r) for r in result)
return [index[x] for x in record_ids if x in index]
def _register_hook(self, cr):
""" stuff to do right after the registry is built """
pass
def __getattr__(self, name):
if name.startswith('signal_'):
signal_name = name[len('signal_'):]
assert signal_name
return (lambda *args, **kwargs:
self.signal_workflow(*args, signal=signal_name, **kwargs))
get = getattr(super(BaseModel, self), '__getattr__', None)
if get is not None: return get(name)
raise AttributeError(
"'%s' object has no attribute '%s'" % (type(self).__name__, name))
# keep this import here, at top it will cause dependency cycle errors
import expression
class Model(BaseModel):
"""Main super-class for regular database-persisted OpenERP models.
OpenERP models are created by inheriting from this class::
class user(Model):
...
The system will later instantiate the class once per database (on
which the class' module is installed).
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False # True in a TransientModel
class TransientModel(BaseModel):
"""Model super-class for transient records, meant to be temporarily
persisted, and regularly vaccuum-cleaned.
A TransientModel has a simplified access rights management,
all users can create new records, and may only access the
records they created. The super-user has unrestricted access
to all TransientModel records.
"""
_auto = True
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = True
class AbstractModel(BaseModel):
"""Abstract Model super-class for creating an abstract class meant to be
inherited by regular models (Models or TransientModels) but not meant to
be usable on its own, or persisted.
Technical note: we don't want to make AbstractModel the super-class of
Model or BaseModel because it would not make sense to put the main
definition of persistence methods such as create() in it, and still we
should be able to override them within an AbstractModel.
"""
_auto = False # don't create any database backend for AbstractModels
_register = False # not visible in ORM registry, meant to be python-inherited only
_transient = False
def itemgetter_tuple(items):
""" Fixes itemgetter inconsistency (useful in some cases) of not returning
a tuple if len(items) == 1: always returns an n-tuple where n = len(items)
"""
if len(items) == 0:
return lambda a: ()
if len(items) == 1:
return lambda gettable: (gettable[items[0]],)
return operator.itemgetter(*items)
class ImportWarning(Warning):
""" Used to send warnings upwards the stack during the import process
"""
pass
def convert_pgerror_23502(model, fields, info, e):
m = re.match(r'^null value in column "(?P<field>\w+)" violates '
r'not-null constraint\n',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"Missing required value for the field '%s'.") % field_name
field = fields.get(field_name)
if field:
message = _(u"Missing required value for the field '%s' (%s)") % (field['string'], field_name)
return {
'message': message,
'field': field_name,
}
def convert_pgerror_23505(model, fields, info, e):
m = re.match(r'^duplicate key (?P<field>\w+) violates unique constraint',
str(e))
field_name = m and m.group('field')
if not m or field_name not in fields:
return {'message': unicode(e)}
message = _(u"The value for the field '%s' already exists.") % field_name
field = fields.get(field_name)
if field:
message = _(u"%s This might be '%s' in the current model, or a field "
u"of the same name in an o2m.") % (message, field['string'])
return {
'message': message,
'field': field_name,
}
PGERROR_TO_OE = collections.defaultdict(
# shape of mapped converters
lambda: (lambda model, fvg, info, pgerror: {'message': unicode(pgerror)}), {
# not_null_violation
'23502': convert_pgerror_23502,
# unique constraint error
'23505': convert_pgerror_23505,
})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| pedrobaeza/odoo | openerp/osv/orm.py | Python | agpl-3.0 | 261,161 |
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# SLEPc - Scalable Library for Eigenvalue Problem Computations
# Copyright (c) 2002-2015, Universitat Politecnica de Valencia, Spain
#
# This file is part of SLEPc.
#
# SLEPc is free software: you can redistribute it and/or modify it under the
# terms of version 3 of the GNU Lesser General Public License as published by
# the Free Software Foundation.
#
# SLEPc is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SLEPc. If not, see <http://www.gnu.org/licenses/>.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
import log, package
class Blzpack(package.Package):
def __init__(self,argdb,log):
self.packagename = 'blzpack'
self.downloadable = False
self.packagedir = ''
self.packagelibs = []
self.log = log
self.ProcessArgs(argdb)
def Check(self,conf,vars,cmake,petsc):
if petsc.scalar == 'complex':
self.log.Exit('ERROR: BLZPACK does not support complex numbers.')
if (petsc.precision != 'single') & (petsc.precision != 'double'):
self.log.Exit('ERROR: BLZPACK is supported only in single or double precision.')
if petsc.ind64:
self.log.Exit('ERROR: Cannot use external packages with 64-bit indices.')
if petsc.precision == 'single':
functions = ['blzdrs']
else:
functions = ['blzdrd']
if self.packagelibs:
libs = [self.packagelibs]
else:
libs = [['-lblzpack']]
if self.packagedir:
dirs = [self.packagedir]
else:
dirs = self.GenerateGuesses('Blzpack')
self.FortranLib(conf,vars,cmake,dirs,libs,functions)
| OpenCMISS-Dependencies/slepc | config/packages/blzpack.py | Python | lgpl-3.0 | 1,966 |
from src.platform.coldfusion.interfaces import AdminInterface
class FPrint(AdminInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "6.1"
| GHubgenius/clusterd | src/platform/coldfusion/fingerprints/CF61.py | Python | mit | 187 |
from django.template.loader import render_to_string
from tasks.const import STATUS_SUCCESS
from .base import library
@library.register('coverage')
def coverage_violation(data):
"""Coverage violation parser
:param data: task data
:type data: dict
:returns: dict
"""
data['status'] = STATUS_SUCCESS
line = data['raw'].split('\n')[-2]
statements, miss, cover = [
part for part in line.split(' ')
if len(part) > 0 and 'TOTAL' not in part
]
each_file = [
filter(len, line.split(' '))
for line in data['raw'].split('\n')[2:-3]
]
data['preview'] = render_to_string('violations/coverage/preview.html', {
'statements': statements,
'miss': miss,
'cover': cover,
})
data['prepared'] = render_to_string('violations/coverage/prepared.html', {
'statements': statements,
'miss': miss,
'cover': cover,
'each_file': each_file,
})
data['plot'] = {
'cover': int(cover[:-1]),
}
data['success_percent'] = int(cover[:-1])
return data
| nvbn/coviolations_web | violations/coverage.py | Python | mit | 1,087 |
#!/usr/env python
from pydimitri import *
from time import time
from math import pi
d = Dimitri()
t = time()
while (True):
d.receiveCurrAngles()
for joint in d.joints:
if joint:
print str(joint.servo_id) + (':%3.0f' % (joint.currAngle*180/pi)),
print time() - t
t = time()
| TauraBots/PyDimitri | show_values.py | Python | mit | 304 |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Generic logging, the way I remember it from scripts gone by.
TODO:
- network logging support.
- log rotation config
"""
from datetime import datetime
import logging
import os
import sys
import traceback
# Define our own FATAL_LEVEL
FATAL_LEVEL = logging.CRITICAL + 10
logging.addLevelName(FATAL_LEVEL, 'FATAL')
# mozharness log levels.
DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL, IGNORE = (
'debug', 'info', 'warning', 'error', 'critical', 'fatal', 'ignore')
# LogMixin {{{1
class LogMixin(object):
"""This is a mixin for any object to access similar logging
functionality -- more so, of course, for those objects with
self.config and self.log_obj, of course.
"""
def _log_level_at_least(self, level):
log_level = INFO
levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL, FATAL]
if hasattr(self, 'config'):
log_level = self.config.get('log_level', INFO)
return levels.index(level) >= levels.index(log_level)
def _print(self, message, stderr=False):
if not hasattr(self, 'config') or self.config.get('log_to_console', True):
if stderr:
print >> sys.stderr, message
else:
print message
def log(self, message, level=INFO, exit_code=-1):
if self.log_obj:
return self.log_obj.log_message(
message, level=level,
exit_code=exit_code,
post_fatal_callback=self._post_fatal,
)
if level == INFO:
if self._log_level_at_least(level):
self._print(message)
elif level == DEBUG:
if self._log_level_at_least(level):
self._print('DEBUG: %s' % message)
elif level in (WARNING, ERROR, CRITICAL):
if self._log_level_at_least(level):
self._print("%s: %s" % (level.upper(), message), stderr=True)
elif level == FATAL:
if self._log_level_at_least(level):
self._print("FATAL: %s" % message, stderr=True)
raise SystemExit(exit_code)
def worst_level(self, target_level, existing_level, levels=None):
"""returns either existing_level or target level.
This depends on which is closest to levels[0]
By default, levels is the list of log levels"""
if not levels:
levels = [FATAL, CRITICAL, ERROR, WARNING, INFO, DEBUG, IGNORE]
if target_level not in levels:
self.fatal("'%s' not in %s'." % (target_level, levels))
for l in levels:
if l in (target_level, existing_level):
return l
# Copying Bear's dumpException():
# https://hg.mozilla.org/build/tools/annotate/1485f23c38e0/sut_tools/sut_lib.py#l23
def exception(self, message=None, level=ERROR):
tb_type, tb_value, tb_traceback = sys.exc_info()
if message is None:
message = ""
else:
message = "%s\n" % message
for s in traceback.format_exception(tb_type, tb_value, tb_traceback):
message += "%s\n" % s
# Log at the end, as a fatal will attempt to exit after the 1st line.
self.log(message, level=level)
def debug(self, message):
self.log(message, level=DEBUG)
def info(self, message):
self.log(message, level=INFO)
def warning(self, message):
self.log(message, level=WARNING)
def error(self, message):
self.log(message, level=ERROR)
def critical(self, message):
self.log(message, level=CRITICAL)
def fatal(self, message, exit_code=-1):
self.log(message, level=FATAL, exit_code=exit_code)
def _post_fatal(self, message=None, exit_code=None):
""" Sometimes you want to create a report or cleanup
or notify on fatal(); override this method to do so.
Please don't use this for anything significantly long-running.
"""
pass
# OutputParser {{{1
class OutputParser(LogMixin):
""" Helper object to parse command output.
This will buffer output if needed, so we can go back and mark
[(linenum - 10):linenum+10] as errors if need be, without having to
get all the output first.
linenum+10 will be easy; we can set self.num_post_context_lines to 10,
and self.num_post_context_lines-- as we mark each line to at least error
level X.
linenum-10 will be trickier. We'll not only need to save the line
itself, but also the level that we've set for that line previously,
whether by matching on that line, or by a previous line's context.
We should only log that line if all output has ended (self.finish() ?);
otherwise store a list of dictionaries in self.context_buffer that is
buffered up to self.num_pre_context_lines (set to the largest
pre-context-line setting in error_list.)
"""
def __init__(self, config=None, log_obj=None, error_list=None, log_output=True):
self.config = config
self.log_obj = log_obj
self.error_list = error_list or []
self.log_output = log_output
self.num_errors = 0
self.num_warnings = 0
# TODO context_lines.
# Not in use yet, but will be based off error_list.
self.context_buffer = []
self.num_pre_context_lines = 0
self.num_post_context_lines = 0
self.worst_log_level = INFO
def parse_single_line(self, line):
for error_check in self.error_list:
# TODO buffer for context_lines.
match = False
if 'substr' in error_check:
if error_check['substr'] in line:
match = True
elif 'regex' in error_check:
if error_check['regex'].search(line):
match = True
else:
self.warning("error_list: 'substr' and 'regex' not in %s" %
error_check)
if match:
log_level = error_check.get('level', INFO)
if self.log_output:
message = ' %s' % line
if error_check.get('explanation'):
message += '\n %s' % error_check['explanation']
if error_check.get('summary'):
self.add_summary(message, level=log_level)
else:
self.log(message, level=log_level)
if log_level in (ERROR, CRITICAL, FATAL):
self.num_errors += 1
if log_level == WARNING:
self.num_warnings += 1
self.worst_log_level = self.worst_level(log_level,
self.worst_log_level)
break
else:
if self.log_output:
self.info(' %s' % line)
def add_lines(self, output):
if isinstance(output, basestring):
output = [output]
for line in output:
if not line or line.isspace():
continue
line = line.decode("utf-8", 'replace').rstrip()
self.parse_single_line(line)
# BaseLogger {{{1
class BaseLogger(object):
"""Create a base logging class.
TODO: status? There may be a status object or status capability in
either logging or config that allows you to count the number of
error,critical,fatal messages for us to count up at the end (aiming
for 0).
"""
LEVELS = {
DEBUG: logging.DEBUG,
INFO: logging.INFO,
WARNING: logging.WARNING,
ERROR: logging.ERROR,
CRITICAL: logging.CRITICAL,
FATAL: FATAL_LEVEL
}
def __init__(
self, log_level=INFO,
log_format='%(message)s',
log_date_format='%H:%M:%S',
log_name='test',
log_to_console=True,
log_dir='.',
log_to_raw=False,
logger_name='',
append_to_log=False,
):
self.log_format = log_format
self.log_date_format = log_date_format
self.log_to_console = log_to_console
self.log_to_raw = log_to_raw
self.log_level = log_level
self.log_name = log_name
self.log_dir = log_dir
self.append_to_log = append_to_log
# Not sure what I'm going to use this for; useless unless we
# can have multiple logging objects that don't trample each other
self.logger_name = logger_name
self.all_handlers = []
self.log_files = {}
self.create_log_dir()
def create_log_dir(self):
if os.path.exists(self.log_dir):
if not os.path.isdir(self.log_dir):
os.remove(self.log_dir)
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.abs_log_dir = os.path.abspath(self.log_dir)
def init_message(self, name=None):
if not name:
name = self.__class__.__name__
self.log_message("%s online at %s in %s" %
(name, datetime.now().strftime("%Y%m%d %H:%M:%S"),
os.getcwd()))
def get_logger_level(self, level=None):
if not level:
level = self.log_level
return self.LEVELS.get(level, logging.NOTSET)
def get_log_formatter(self, log_format=None, date_format=None):
if not log_format:
log_format = self.log_format
if not date_format:
date_format = self.log_date_format
return logging.Formatter(log_format, date_format)
def new_logger(self, logger_name):
"""Create a new logger.
By default there are no handlers.
"""
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(self.get_logger_level())
self._clear_handlers()
if self.log_to_console:
self.add_console_handler()
if self.log_to_raw:
self.log_files['raw'] = '%s_raw.log' % self.log_name
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files['raw']),
log_format='%(message)s')
def _clear_handlers(self):
"""To prevent dups -- logging will preserve Handlers across
objects :(
"""
attrs = dir(self)
if 'all_handlers' in attrs and 'logger' in attrs:
for handler in self.all_handlers:
self.logger.removeHandler(handler)
self.all_handlers = []
def __del__(self):
logging.shutdown()
self._clear_handlers()
def add_console_handler(self, log_level=None, log_format=None,
date_format=None):
console_handler = logging.StreamHandler()
console_handler.setLevel(self.get_logger_level(log_level))
console_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(console_handler)
self.all_handlers.append(console_handler)
def add_file_handler(self, log_path, log_level=None, log_format=None,
date_format=None):
if not self.append_to_log and os.path.exists(log_path):
os.remove(log_path)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self.get_logger_level(log_level))
file_handler.setFormatter(self.get_log_formatter(log_format=log_format,
date_format=date_format))
self.logger.addHandler(file_handler)
self.all_handlers.append(file_handler)
def log_message(self, message, level=INFO, exit_code=-1, post_fatal_callback=None):
"""Generic log method.
There should be more options here -- do or don't split by line,
use os.linesep instead of assuming \n, be able to pass in log level
by name or number.
Adding the IGNORE special level for runCommand.
"""
if level == IGNORE:
return
for line in message.splitlines():
self.logger.log(self.get_logger_level(level), line)
if level == FATAL:
if callable(post_fatal_callback):
self.logger.log(FATAL_LEVEL, "Running post_fatal callback...")
post_fatal_callback(message=message, exit_code=exit_code)
self.logger.log(FATAL_LEVEL, 'Exiting %d' % exit_code)
raise SystemExit(exit_code)
# SimpleFileLogger {{{1
class SimpleFileLogger(BaseLogger):
"""Create one logFile. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self,
log_format='%(asctime)s %(levelname)8s - %(message)s',
logger_name='Simple', log_dir='logs', **kwargs):
BaseLogger.__init__(self, logger_name=logger_name, log_format=log_format,
log_dir=log_dir, **kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
self.log_path = os.path.join(self.abs_log_dir, '%s.log' % self.log_name)
self.log_files['default'] = self.log_path
self.add_file_handler(self.log_path)
# MultiFileLogger {{{1
class MultiFileLogger(BaseLogger):
"""Create a log per log level in log_dir. Possibly also output to
the terminal and a raw log (no prepending of level or date)
"""
def __init__(self, logger_name='Multi',
log_format='%(asctime)s %(levelname)8s - %(message)s',
log_dir='logs', log_to_raw=True, **kwargs):
BaseLogger.__init__(self, logger_name=logger_name,
log_format=log_format,
log_to_raw=log_to_raw, log_dir=log_dir,
**kwargs)
self.new_logger(self.logger_name)
self.init_message()
def new_logger(self, logger_name):
BaseLogger.new_logger(self, logger_name)
min_logger_level = self.get_logger_level(self.log_level)
for level in self.LEVELS.keys():
if self.get_logger_level(level) >= min_logger_level:
self.log_files[level] = '%s_%s.log' % (self.log_name,
level)
self.add_file_handler(os.path.join(self.abs_log_dir,
self.log_files[level]),
log_level=level)
# __main__ {{{1
if __name__ == '__main__':
pass
| mrrrgn/build-mozharness | mozharness/base/log.py | Python | mpl-2.0 | 14,897 |
"""OAuth 2.0 WSGI server middleware providing MyProxy certificates as access tokens
"""
__author__ = "W V Engen"
__date__ = "13/11/12"
__copyright__ = "(C) 2012 FOM / Nikhef"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = "$Id$"
import logging
from ConfigParser import SafeConfigParser
log = logging.getLogger(__name__)
class ResourceRegistration(object):
"""
An entry in the resource register.
"""
def __init__(self, name, resource_id,
resource_secret, authentication_data):
self.name = name
self.id = resource_id
self.secret = resource_secret
self.authentication_data = authentication_data
class ResourceRegister(object):
"""
Resource reqister read from a configuration file
"""
register = {}
def __init__(self, config_file=None):
if config_file:
config = SafeConfigParser()
config.read(config_file)
resource_keys = config.get('resource_register', 'resources').strip()
if resource_keys:
for resource_key in [k.strip() for k in resource_keys.split(',')]:
self._create_resource(config, resource_key, 'resource')
def _create_resource(self, config, resource_key, prefix):
resource_section_name = prefix + ':' + resource_key
resource_id = config.get(resource_section_name, 'id')
resource_secret = None
if config.has_option(resource_section_name, 'secret'):
resource_secret = config.get(resource_section_name, 'secret')
resource_authentication_data = None
if config.has_option(resource_section_name, 'authentication_data'):
resource_authentication_data = config.get(resource_section_name, 'authentication_data')
resource_registration = ResourceRegistration(
config.get(resource_section_name, 'name'),
resource_id,
resource_secret,
resource_authentication_data)
self.register[resource_id] = resource_registration
def is_registered_resource(self, resource_id):
"""Determines if a resource ID is in the resource register.
"""
if resource_id not in self.register:
return ('Resource of id "%s" is not registered.' % resource_id)
return None
| TheLanguageArchive/ndg_oauth | ndg_oauth_server/ndg/oauth/server/lib/register/resource.py | Python | bsd-3-clause | 2,373 |
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
import numpy as np
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name="src.subsetting_with_loc"
subsetting_with_loc = load(module_name, "subsetting_with_loc")
ph = patch_helper(module_name)
@points('p04-07.1')
class SubsettingWithLoc(unittest.TestCase):
def test_shape(self):
df = subsetting_with_loc()
self.assertEqual(df.shape, (311,3), msg="Incorrect shape!")
def test_columns_and_indices(self):
df = subsetting_with_loc()
np.testing.assert_array_equal(df.columns, ["Population",
"Share of Swedish-speakers of the population, %",
"Share of foreign citizens of the population, %"],
err_msg="Incorrect column names!")
self.assertEqual(df.index[0], "Akaa", msg="Incorrect first index!")
self.assertEqual(df.index[-1], "Äänekoski", msg="Incorrect last index!")
if __name__ == '__main__':
unittest.main()
| mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part04-e07_subsetting_with_loc/test/test_subsetting_with_loc.py | Python | gpl-3.0 | 1,128 |
from collections import OrderedDict
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
import classifiers as classifier_tests
import regressors as regressor_tests
training_parameters = {
'model_names': ['GradientBoosting', 'XGB', 'DeepLearning', 'LGBM']
}
# Make this an OrderedDict so that we run the tests in a consistent order
test_names = OrderedDict([
('optimize_final_model_classification', classifier_tests.optimize_final_model_classification),
('getting_single_predictions_classification', classifier_tests.getting_single_predictions_classification),
('feature_learning_getting_single_predictions_classification', classifier_tests.feature_learning_getting_single_predictions_classification),
('getting_single_predictions_multilabel_classification', classifier_tests.getting_single_predictions_multilabel_classification),
('categorical_ensembling_classification', classifier_tests.categorical_ensembling_classification),
('feature_learning_categorical_ensembling_getting_single_predictions_classification', classifier_tests.feature_learning_categorical_ensembling_getting_single_predictions_classification)
# ('optimize_final_model_regression', regressor_tests.optimize_final_model_regression),
# ('getting_single_predictions_regression', regressor_tests.getting_single_predictions_regression),
# ('feature_learning_getting_single_predictions_regression', regressor_tests.feature_learning_getting_single_predictions_regression),
# ('categorical_ensembling_regression', regressor_tests.categorical_ensembling_regression),
# ('feature_learning_categorical_ensembling_getting_single_predictions_regression', regressor_tests.feature_learning_categorical_ensembling_getting_single_predictions_regression)
])
def test_generator():
for model_name in training_parameters['model_names']:
for test_name, test in test_names.items():
test_model_name = model_name
if '_classification' in test_name and model_name is not None:
test_model_name = model_name + 'Classifier'
elif '_regression' in test_name and model_name is not None:
test_model_name = model_name + 'Regressor'
test.description = str(test_model_name) + '_' + test_name
yield test, test_model_name
| doordash/auto_ml | tests/automated_tests.py | Python | mit | 2,388 |
import gridfs
import pymongo
__author__ = 'abilgin'
class Database(object):
def __init__(self):
self.URI = "mongodb://host:port"
self.client = pymongo.MongoClient(self.URI)
self.db = self.client["dbname"]
self.fs = gridfs.GridFS(self.db)
def insert(self, collection, data):
self.db[collection].insert(data)
def find(self, collection, query, projection=None):
return self.db[collection].find(query, projection)
def find_one(self, collection, query, projection=None):
return self.db[collection].find_one(query, projection)
def update(self, collection, query, data):
self.db[collection].update(query, data, upsert=True)
def remove(self, collection, query):
self.db[collection].remove(query)
def iter_collection(self, collection, key={}):
"""Creates a cursor to iterate over and returns it
a key can be given to limit the results from the find command
"""
cursor = self.db[collection].find(key, no_cursor_timeout=True)
for item in cursor:
yield item
cursor.close()
def getGridFS(self):
return self.fs | aysenurbilgin/tilt_api | src/common/database.py | Python | apache-2.0 | 1,178 |
import csv
import argparse
import os.path
import schema
import itertools
from collections import defaultdict
from simplify_path import simplify_path
import sqlite3
class MbtaHeavyRail:
def __init__(self):
self.stop_ids = set()
def write_sql(self, cur, startorder, route_ids, as_route, gtfs_map):
route_rows = [list(gtfs_map.find_routes_by_id(route_id))[0] for route_id in route_ids]
route_color = [route_row["route_color"] for route_row in route_rows][0]
shape_rows = itertools.chain.from_iterable((gtfs_map.find_sorted_shapes_by_route(item) for item in route_ids))
# this stores a list of list of lat, lon pairs
print("Appending paths for %s" % as_route)
paths = []
shape_rows = list(sorted(shape_rows, key=lambda shape: shape["shape_id"]))
print("Adding shapes...")
# todo: sorted?
for shape_id, group_rows in itertools.groupby(shape_rows, lambda shape: shape["shape_id"]):
path = [(float(row["shape_pt_lat"]), float(row["shape_pt_lon"])) for row in group_rows]
path = simplify_path(path)
paths.append(path)
stop_rows = itertools.chain.from_iterable(gtfs_map.find_stops_by_route(route) for route in route_ids)
pathblob = schema.Box(paths).get_blob_string()
print("Inserting route information for %s" % as_route)
# insert route information
obj = schema.getSchemaAsObject()
obj.routes.route.value = as_route
obj.routes.routetitle.value = as_route
obj.routes.color.value = int("0x%s" % route_color, 0)
obj.routes.oppositecolor.value = int("0x%s" % route_color, 0)
obj.routes.listorder.value = startorder
obj.routes.agencyid.value = schema.SubwayAgencyId
obj.routes.pathblob.value = pathblob
cur.execute(obj.routes.insert())
print("Adding stops...")
for stop_row in stop_rows:
stop_id = stop_row["stop_id"]
if stop_id not in self.stop_ids:
obj.stops.tag.value = stop_row["stop_id"]
obj.stops.title.value = stop_row["stop_name"]
obj.stops.lat.value = float(stop_row["stop_lat"])
obj.stops.lon.value = float(stop_row["stop_lon"])
obj.stops.parent.value = stop_row["parent_station"]
cur.execute(obj.stops.insert())
obj.stopmapping.route.value = as_route
obj.stopmapping.tag.value = stop_row["stop_id"]
cur.execute(obj.stopmapping.insert())
self.stop_ids.add(stop_id)
for route_id in route_ids:
print("Adding directions for {}...".format(route_id))
for trip_row in gtfs_map.find_trips_by_route(route_id):
obj.directions.dirTag.value = trip_row["trip_id"]
obj.directions.dirTitleKey.value = trip_row["trip_headsign"]
obj.directions.dirRouteKey.value = as_route
obj.directions.dirNameKey.value = ""
obj.directions.useAsUI.value = 1
cur.execute(obj.directions.insert())
print("Done for %s" % as_route)
return (1)
def generate(self, conn, startorder, gtfs_map):
cur = conn.cursor()
startorder += self.write_sql(cur, startorder, ["Red"], "Red", gtfs_map)
startorder += self.write_sql(cur, startorder, ["Orange"], "Orange", gtfs_map)
startorder += self.write_sql(cur, startorder, ["Blue"], "Blue", gtfs_map)
startorder += self.write_sql(cur, startorder, ["Green-B", "Green-C", "Green-D", "Green-E"], "Green", gtfs_map)
startorder += self.write_sql(cur, startorder, ["Mattapan"], "Mattapan", gtfs_map)
startorder += self.write_sql(cur, startorder, ["712"], "712", gtfs_map)
startorder += self.write_sql(cur, startorder, ["713"], "713", gtfs_map)
conn.commit()
cur.close()
return startorder
| noisecapella/bostonbusmap | tools/generate/mbta_heavy_rail.py | Python | gpl-3.0 | 3,999 |
from . import tree
class Node:
def __init__(self, container, parent, value=None):
self._node = tree.Node(container, parent, value)
@property
def _container(self):
return self._node._container
@property
def id(self):
return self._node.id
@property
def value(self):
return self._node.value
@value.setter
def value(self, value):
self._node.value = value
def _create_children(self):
left = Node(self._container, self)
right = Node(self._container, self)
self._children = [left, right]
return self._children
def _create_child(self, index, value):
try:
children = self._children
except AttributeError:
children = self._create_children()
children[index].value = value
return children[index]
def create_left_child(self, value):
return self._create_child(0, value)
def create_right_child(self, value):
return self._create_child(1, value)
def _child(self, index):
try:
child = self._children[index]
except AttributeError:
return None
if child.value is None:
return None
return child
@property
def left_child(self):
return self._child(0)
@property
def right_child(self):
return self._child(1)
class BinaryTree:
def __init__(self, *args, **kwargs):
self._tree = tree.Tree(*args, **kwargs)
@property
def root(self):
try:
return self._root
except AttributeError:
return None
def create_root(self, value):
if self.root:
raise RuntimeError("Cannot set root more that once")
self._root = Node(self, None, value)
return self._root
@property
def _pipe(self):
return self._tree._pipe
def sync(self):
self._tree.sync()
@classmethod
def name(cls):
return tree.Tree.__name__
| alviproject/alvi | alvi/client/containers/binary_tree.py | Python | mit | 2,017 |
"""The component for STIEBEL ELTRON heat pumps with ISGWeb Modbus module."""
from datetime import timedelta
import logging
from pystiebeleltron import pystiebeleltron
import voluptuous as vol
from homeassistant.components.modbus import (
CONF_HUB,
DEFAULT_HUB,
DOMAIN as MODBUS_DOMAIN,
)
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
DOMAIN = "stiebel_eltron"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
def setup(hass, config):
"""Set up the STIEBEL ELTRON unit.
Will automatically load climate platform.
"""
name = config[DOMAIN][CONF_NAME]
modbus_client = hass.data[MODBUS_DOMAIN][config[DOMAIN][CONF_HUB]]
hass.data[DOMAIN] = {
"name": name,
"ste_data": StiebelEltronData(name, modbus_client),
}
discovery.load_platform(hass, "climate", DOMAIN, {}, config)
return True
class StiebelEltronData:
"""Get the latest data and update the states."""
def __init__(self, name, modbus_client):
"""Init the STIEBEL ELTRON data object."""
self.api = pystiebeleltron.StiebelEltronAPI(modbus_client, 1)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update unit data."""
if not self.api.update():
_LOGGER.warning("Modbus read failed")
else:
_LOGGER.debug("Data updated successfully")
| leppa/home-assistant | homeassistant/components/stiebel_eltron/__init__.py | Python | apache-2.0 | 1,830 |
import sys
import urllib
import simplejson
import csv
reload(sys)
sys.setdefaultencoding('utf8')
googleGeocodeUrl = 'http://maps.googleapis.com/maps/api/geocode/json?'
csv_file = 'unhcr_popstats_export_demographics_all_data.csv'
def get_coordinates(query, from_sensor=False):
query = query.encode('utf-8')
if query == 'Central and Southern Serbia':
return [44.0, 21.0]
if query == 'Kyrgyzstan':
return [42.8667, 74.6000]
params = {
'address': query,
'sensor': "true" if from_sensor else "false"
}
url = googleGeocodeUrl + urllib.urlencode(params)
json_response = None
while json_response == None:
try:
json_response = urllib.urlopen(url)
except:
pass
response = simplejson.loads(json_response.read())
if response['results']:
# print response
location = response['results'][0]['geometry']['location']
form_address = response['results'][0]['formatted_address']
latitude, longitude = location['lat'], location['lng']
print "Found", len(response['results']), "for:\n", query, "\nFormatted Address::\n", form_address, "\nHighest Accuracy:\n", latitude, longitude
else:
#latitude, longitude = None, None
print query, "<no results>"
return None
return [latitude, longitude]
#def swap_encoding():
# reload(sys)
# if encoding == 'ascii':
# sys.setdefaultencoding('utf8')
# encoding = 'utf8'
#
# elif encoding == 'utf8':
# sys.setdefaultencoding('ascii')
# encoding = 'ascii'
def build_table():
queries_seen = {}
# open csv file
with open(csv_file, 'rb') as csvfile:
with open('OUT_' + csv_file, 'w+') as csvfileW:
reader = csv.DictReader(csvfile)
writer = csv.DictWriter(csvfileW, ['Latitude', 'Longitude'])
for row in reader:
print "\n"
country = row['Country'] #.trim()
place = row['Location'] #.trim()
query = str(str(place) + ', ' + str(country))
print(query)
if not query in queries_seen:
coords = None
while coords == None or len(coords) == 0: # If country returns none, we have a connection Problem :S
coords = get_coordinates(query)
if coords == None:
if place not in ['Central', 'West', 'East', 'North', 'South', 'Middle', 'Mid']:
coords = get_coordinates(place)
if coords == None:
coords = get_coordinates(country)
if coords == None:
coords = get_coordinates(country.split(' ')[0])
else:
coords = get_coordinates(country)
if coords == None:
coords = get_coordinates(country.split(' ')[0])
queries_seen[query] = coords
else:
coords = queries_seen[query]
if len(coords) == 2:
print(coords)
writer.writerow({'Latitude': coords[0], 'Longitude': coords[1]})
else:
print "ERROR\n"
writer.writerow({'Latitude': '', 'Longitude': ''})
if __name__=='__main__':
build_table()
| lepszy/nostradamIQ | tools/build_locationsrefugees.py | Python | gpl-3.0 | 3,687 |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.title.font", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/pie/title/font/_family.py | Python | mit | 595 |
"""bokepdo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from dashboard_admin import views as site
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^admin/', admin.site.urls),
# url(r'^$', site.index, name='index'),
url(r'^$', site.index_new, name='index_new'),
url(r'^login/$', site.login_user, name='login_user'),
url (r'^logout/$', 'django.contrib.auth.views.logout', name='logout', kwargs={'next_page': '/'}),
url(r'^gallery/image=(?P<post_id>[^/]+).html$', site.detail_new, name='detail_new'),
url(r'^gallery/image=(?P<post_id>[^/]+).html/rename$', site.rename_data, name='rename_data'),
url(r'^gallery/image=(?P<post_id>[^/]+).html/zip$', site.zip_post, name='zip_post'),
# url(r'^detail/(?P<post_id>[0-9]+)/$', site.detail, name='detail'),
url(r'^scrap/$', site.image_scrapper, name='image_scrapper'),
url(r'^output/$', site.list_image, name='list_image'),
url(r'^export/$', site.export_image, name='export_image'),
url(r'^zip/$', site.zip_file, name='zip_file'),
url(r'^generate_uuid/$', site.generate_uuid, name='generate_uuid'),
url(r'^post/$', site.export_to_post, name='export_to_post'),
url(r'^delete/(?P<x>[^/]+)$', site.delete_one, name='delete_one'),
url(r'^category/search=(?P<name>[^/]+).html$', site.category_post, name='category_post'),
url(r'^populer.html$', site.populer_post, name='populer_post'),
url(r'^search.html$', site.search_post, name='search_post'),
url(r'^list/search=(?P<name>[^/]+).html$', site.model_list, name='model_list'),
url(r'^addmodel.html$', site.add_modelname, name='add_modelname'),
]
if settings.DEBUG:
urlpatterns.append(url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}))
urlpatterns.append(url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.STATIC_ROOT,
}))
| efenfauzi/tiptopgallery | bokepdo/urls.py | Python | gpl-3.0 | 2,623 |
import logging
import re
from typing import Union, Dict
from datetime import timedelta
from discord.ext.commands.converter import Converter
from redbot.core import commands
log = logging.getLogger("red.cogs.mutes")
# the following regex is slightly modified from Red
# it's changed to be slightly more strict on matching with finditer
# this is to prevent "empty" matches when parsing the full reason
# This is also designed more to allow time interval at the beginning or the end of the mute
# to account for those times when you think of adding time *after* already typing out the reason
# https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/commands/converter.py#L55
TIME_RE_STRING = r"|".join(
[
r"((?P<weeks>\d+?)\s?(weeks?|w))",
r"((?P<days>\d+?)\s?(days?|d))",
r"((?P<hours>\d+?)\s?(hours?|hrs|hr?))",
r"((?P<minutes>\d+?)\s?(minutes?|mins?|m(?!o)))", # prevent matching "months"
r"((?P<seconds>\d+?)\s?(seconds?|secs?|s))",
]
)
TIME_RE = re.compile(TIME_RE_STRING, re.I)
TIME_SPLIT = re.compile(r"t(?:ime)?=")
class MuteTime(Converter):
"""
This will parse my defined multi response pattern and provide usable formats
to be used in multiple reponses
"""
async def convert(
self, ctx: commands.Context, argument: str
) -> Dict[str, Union[timedelta, str, None]]:
time_split = TIME_SPLIT.split(argument)
result: Dict[str, Union[timedelta, str, None]] = {}
if time_split:
maybe_time = time_split[-1]
else:
maybe_time = argument
time_data = {}
for time in TIME_RE.finditer(maybe_time):
argument = argument.replace(time[0], "")
for k, v in time.groupdict().items():
if v:
time_data[k] = int(v)
if time_data:
result["duration"] = timedelta(**time_data)
result["reason"] = argument.strip()
return result
| palmtree5/Red-DiscordBot | redbot/cogs/mutes/converters.py | Python | gpl-3.0 | 1,983 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-11 08:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schedule', '0021_auto_20160302_1726'),
]
operations = [
migrations.AddField(
model_name='instrumentappointment',
name='times',
field=models.FloatField(default=0.5, help_text='\u9884\u4f30\u57f9\u8bad\u4f7f\u7528\u65f6\u95f4', verbose_name='\u7528\u65f6/\u5c0f\u65f6'),
),
migrations.AlterField(
model_name='instrumentappointment',
name='instrument',
field=models.ManyToManyField(help_text='\u901a\u8fc7\u6309\u4f4fCtrl\u6765\u5b9e\u73b0\u591a\u9009\uff01', to='schedule.Instrument', verbose_name='\u4eea\u5668'),
),
]
| fenglb/mysite | schedule/migrations/0022_auto_20160311_1603.py | Python | cc0-1.0 | 860 |
def save_list(input_list, file_name='temp.txt'):
"""A list (input_list) is saved to a file (file_name)"""
with open(file_name, 'w') as fh:
print(*input_list, sep='\n', file=fh)
return None
| Serulab/Py4Bio | code/ch6/list2text2.py | Python | mit | 209 |
#!/usr/bin/python
# PBR Generated from u'console_scripts'
import sys
from nova.cmd.compute import main
if __name__ == "__main__":
sys.exit(main()) | silenceli/nova | nova/tests/test_compute.py | Python | apache-2.0 | 154 |
"""Imports:
sys: for console output
"""
import sys
def state(description):
"""Prints a description with no progress"""
sys.stdout.write("%s...\n" % (description))
def skip(description):
"""Prints a description as having been skipped"""
sys.stdout.write("%s... SKIPPED\n" % (description))
def report(description, total_units, done_units):
"""Prints a description with its total progress"""
if done_units == total_units:
sys.stdout.write("%s... 100%%\n" % (description))
#sys.stdout.write("%s... %4d/%4d\n" % (description, done_units, total_units))
else:
percentage = (done_units / float(total_units)) * 100
sys.stdout.write("%s... %3.0f%%\r" % (description, percentage))
#sys.stdout.write("%s... %4d/%4d\r" % (description, done_units, total_units))
sys.stdout.flush()
| jonsim/music_tagger | Progress.py | Python | gpl-3.0 | 850 |
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$',
'django.contrib.auth.views.login',
{'template_name':'login.html'},
name='login'),
url(r'^accounts/logout/$',
'django.contrib.auth.views.logout', name='logout'),
url(r'^', include('banana.urls')),
) + \
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
handler500 = 'banana.views.etc.banana_500'
| bartscheers/banana | project/urls.py | Python | bsd-3-clause | 819 |
import urllib
import json
import datetime
import time
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
from oauth import oauth # for twitter
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
# decorator is used to leave the connection open till the callback is
# executed.
@tornado.web.asynchronous
def get(self):
query = self.get_argument('q')
# here AsynHTTPClient is used to to handle requests asynchrnously
# and a call back function is needed which will be called when the
# response is returned.
# in this case, when twitter returns the response, the callback
# is called.
# compared to earlier the code is now split in two functions to
# handle the requests.
client = tornado.httpclient.AsyncHTTPClient()
url = "https://api.twitter.com/1.1/search/tweets.json?" + \
urllib.urlencode({"q": query, "result_type": "recent", "count": 100})
client.fetch(url, headers={'Authorization': oauth(url)}, callback=self.on_response)
def on_response(self, response):
body = json.loads(response.body)
result_count = len(body['statuses'])
now = datetime.datetime.utcnow()
raw_oldest_tweet_at = body['statuses'][-1]['created_at']
oldest_tweet_at = datetime.datetime.strptime(raw_oldest_tweet_at,
"%a %b %d %H:%M:%S +0000 %Y")
seconds_diff = time.mktime(now.timetuple()) - \
time.mktime(oldest_tweet_at.timetuple())
tweets_per_second = float(result_count) / seconds_diff
self.write("""
<div style="text-align: center">
<div style="font-size: 72px">%s</div>
<div style="font-size: 144px">%.02f</div>
<div style="font-size: 24px">tweets per second</div>
</div>""" % (self.get_argument('q'), tweets_per_second))
# self.finish() is very important as Tornado will never close the
# connection on it's own. So by using self.finish we are telling
# Tornado to close the connection.
# otherise bad things will happen.. mmkay.
self.finish()
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=[(r"/", IndexHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start() | avinassh/learning-tornado | tornado-book/asynchronous_web_services/async_http/tweet_rate_async.py | Python | mit | 2,552 |
from django import forms
from rango.models import Page, Category
from rango.models import UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Category
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text="Please enter title of the page")
url = forms.URLField(max_length=200, help_text="Please enter URL of the page")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Page
fields = ('title', 'url', 'views')
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
| Kentoseth/rangoapp | tango_with_django_project/rango/forms.py | Python | mit | 1,390 |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Plan
import github.Team
import github.Event
import github.Repository
import github.NamedUser
class Organization(github.GithubObject.CompletableGithubObject):
"""
This class represents Organizations. The reference can be found here http://developer.github.com/v3/orgs/
"""
def __repr__(self):
return self.get__repr__({"id": self._id.value, "name": self._name.value})
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def billing_email(self):
"""
:type: string
"""
self._completeIfNotSet(self._billing_email)
return self._billing_email.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._public_members_url)
return self._public_members_url.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_public_members(self, public_member):
"""
:calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/public_members/" + public_member._identity
)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, team_id=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, gitignore_template=github.GithubObject.NotSet):
"""
:calls: `POST /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param team_id: :class:`github.Team.Team`
:param auto_init: bool
:param gitignore_template: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert team_id is github.GithubObject.NotSet or isinstance(team_id, github.Team.Team), team_id
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if team_id is not github.GithubObject.NotSet:
post_parameters["team_id"] = team_id._identity
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_team(self, name, repo_names=github.GithubObject.NotSet, permission=github.GithubObject.NotSet):
"""
:calls: `POST /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param repo_names: list of :class:`github.Repository.Repository`
:param permission: string
:rtype: :class:`github.Team.Team`
"""
assert isinstance(name, (str, unicode)), name
assert repo_names is github.GithubObject.NotSet or all(isinstance(element, github.Repository.Repository) for element in repo_names), repo_names
assert permission is github.GithubObject.NotSet or isinstance(permission, (str, unicode)), permission
post_parameters = {
"name": name,
}
if repo_names is not github.GithubObject.NotSet:
post_parameters["repo_names"] = [element._identity for element in repo_names]
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/teams",
input=post_parameters
)
return github.Team.Team(self._requester, headers, data, completed=True)
def edit(self, billing_email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, email=github.GithubObject.NotSet, location=github.GithubObject.NotSet, name=github.GithubObject.NotSet):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(billing_email, (str, unicode)), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters["billing_email"] = billing_email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_events(self):
"""
:calls: `GET /orgs/:org/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
self.url + "/issues",
url_parameters
)
def get_members(self, filter_=github.GithubObject.NotSet,
role=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/members <http://developer.github.com/v3/orgs/members>`_
:param filter_: string
:param role: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
assert (filter_ is github.GithubObject.NotSet or
isinstance(filter_, (str, unicode))), filter_
assert (role is github.GithubObject.NotSet or
isinstance(role, (str, unicode))), role
url_parameters = {}
if filter_ is not github.GithubObject.NotSet:
url_parameters["filter"] = filter_
if role is not github.GithubObject.NotSet:
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
url_parameters
)
def get_public_members(self):
"""
:calls: `GET /orgs/:org/public_members <http://developer.github.com/v3/orgs/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/public_members",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param type: string ('all', 'public', 'private', 'forks', 'sources', 'member')
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
url_parameters
)
def get_team(self, id):
"""
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/teams/" + str(id)
)
return github.Team.Team(self._requester, headers, data, completed=True)
def get_teams(self):
"""
:calls: `GET /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/teams",
None
)
def has_in_members(self, member):
"""
:calls: `GET /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/members/" + member._identity
)
if status == 302:
status, headers, data = self._requester.requestJson(
"GET",
headers['location']
)
return status == 204
def has_in_public_members(self, public_member):
"""
:calls: `GET /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/public_members/" + public_member._identity
)
return status == 204
def remove_from_members(self, member):
"""
:calls: `DELETE /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/members/" + member._identity
)
def remove_from_public_members(self, public_member):
"""
:calls: `DELETE /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/public_members/" + public_member._identity
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._billing_email = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_members_url = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "billing_email" in attributes: # pragma no branch
self._billing_email = self._makeStringAttribute(attributes["billing_email"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_members_url" in attributes: # pragma no branch
self._public_members_url = self._makeStringAttribute(attributes["public_members_url"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| b0ttl3z/SickRage | lib/github/Organization.py | Python | gpl-3.0 | 29,379 |
from common.sagemaker_component import SageMakerJobStatus
from process.src.sagemaker_process_spec import SageMakerProcessSpec
from process.src.sagemaker_process_component import SageMakerProcessComponent
from tests.unit_tests.tests.process.test_process_spec import ProcessSpecTestCase
import unittest
import json
from unittest.mock import patch, MagicMock, ANY
class ProcessComponentTestCase(unittest.TestCase):
REQUIRED_ARGS = ProcessSpecTestCase.REQUIRED_ARGS
@classmethod
def setUp(cls):
cls.component = SageMakerProcessComponent()
# Instantiate without calling Do()
cls.component._processing_job_name = "test-job"
@patch("process.src.sagemaker_process_component.super", MagicMock())
def test_do_sets_name(self):
named_spec = SageMakerProcessSpec(
self.REQUIRED_ARGS + ["--job_name", "job-name"]
)
unnamed_spec = SageMakerProcessSpec(self.REQUIRED_ARGS)
self.component.Do(named_spec)
self.assertEqual("job-name", self.component._processing_job_name)
with patch(
"process.src.sagemaker_process_component.SageMakerComponent._generate_unique_timestamped_id",
MagicMock(return_value="unique"),
):
self.component.Do(unnamed_spec)
self.assertEqual("unique", self.component._processing_job_name)
@patch("common.sagemaker_component.SageMakerComponent._print_cloudwatch_logs")
def test_cw_logs(self, mocked_super_component):
self.component._print_logs_for_job()
self.component._print_cloudwatch_logs.assert_called_once_with(
"/aws/sagemaker/ProcessingJobs", self.component._processing_job_name
)
def test_create_process_job(self):
spec = SageMakerProcessSpec(self.REQUIRED_ARGS)
request = self.component._create_job_request(spec.inputs, spec.outputs)
self.assertEqual(
request,
{
"ProcessingJobName": "test-job",
"ProcessingInputs": [
{
"InputName": "dataset-input",
"S3Input": {
"S3Uri": "s3://my-bucket/dataset.csv",
"LocalPath": "/opt/ml/processing/input",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
},
}
],
"ProcessingOutputConfig": {
"Outputs": [
{
"OutputName": "training-outputs",
"S3Output": {
"S3Uri": "s3://my-bucket/outputs/train.csv",
"LocalPath": "/opt/ml/processing/output/train",
"S3UploadMode": "Continuous",
},
}
]
},
"RoleArn": "arn:aws:iam::123456789012:user/Development/product_1234/*",
"ProcessingResources": {
"ClusterConfig": {
"InstanceType": "ml.m4.xlarge",
"InstanceCount": 1,
"VolumeSizeInGB": 30,
}
},
"NetworkConfig": {
"EnableInterContainerTrafficEncryption": False,
"EnableNetworkIsolation": True,
},
"StoppingCondition": {"MaxRuntimeInSeconds": 86400},
"AppSpecification": {"ImageUri": "test-image"},
"Environment": {},
"Tags": [],
},
)
def test_get_job_status(self):
self.component._sm_client = MagicMock()
self.component._sm_client.describe_processing_job.return_value = {
"ProcessingJobStatus": "Starting"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=False, raw_status="Starting"),
)
self.component._sm_client.describe_processing_job.return_value = {
"ProcessingJobStatus": "Downloading"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=False, raw_status="Downloading"),
)
self.component._sm_client.describe_processing_job.return_value = {
"ProcessingJobStatus": "Completed"
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(is_completed=True, raw_status="Completed"),
)
self.component._sm_client.describe_processing_job.return_value = {
"ProcessingJobStatus": "Failed",
"FailureReason": "lolidk",
}
self.assertEqual(
self.component._get_job_status(),
SageMakerJobStatus(
is_completed=True,
raw_status="Failed",
has_error=True,
error_message="lolidk",
),
)
def test_after_job_completed(self):
spec = SageMakerProcessSpec(self.REQUIRED_ARGS)
mock_out = {"out1": "val1", "out2": "val2"}
self.component._get_job_outputs = MagicMock(return_value=mock_out)
self.component._after_job_complete({}, {}, spec.inputs, spec.outputs)
self.assertEqual(spec.outputs.job_name, "test-job")
self.assertEqual(
spec.outputs.output_artifacts, {"out1": "val1", "out2": "val2"}
)
def test_get_job_outputs(self):
self.component._sm_client = mock_client = MagicMock()
mock_client.describe_processing_job.return_value = {
"ProcessingOutputConfig": {
"Outputs": [
{"OutputName": "train", "S3Output": {"S3Uri": "s3://train"}},
{"OutputName": "valid", "S3Output": {"S3Uri": "s3://valid"}},
]
}
}
self.assertEqual(
self.component._get_job_outputs(),
{"train": "s3://train", "valid": "s3://valid"},
)
def test_no_defined_image(self):
# Pass the image to pass the parser
no_image_args = self.REQUIRED_ARGS.copy()
image_index = no_image_args.index("--image")
# Cut out --image and it's associated value
no_image_args = no_image_args[:image_index] + no_image_args[image_index + 2 :]
with self.assertRaises(SystemExit):
SageMakerProcessSpec(no_image_args)
def test_container_entrypoint(self):
entrypoint, arguments = ["/bin/bash"], ["arg1", "arg2"]
container_args = SageMakerProcessSpec(
self.REQUIRED_ARGS
+ [
"--container_entrypoint",
json.dumps(entrypoint),
"--container_arguments",
json.dumps(arguments),
]
)
response = self.component._create_job_request(
container_args.inputs, container_args.outputs
)
self.assertEqual(
response["AppSpecification"]["ContainerEntrypoint"], entrypoint
)
self.assertEqual(response["AppSpecification"]["ContainerArguments"], arguments)
def test_environment_variables(self):
env_vars = {"key1": "val1", "key2": "val2"}
environment_args = SageMakerProcessSpec(
self.REQUIRED_ARGS + ["--environment", json.dumps(env_vars)]
)
response = self.component._create_job_request(
environment_args.inputs, environment_args.outputs
)
self.assertEqual(response["Environment"], env_vars)
def test_vpc_configuration(self):
required_vpc_args = SageMakerProcessSpec(
self.REQUIRED_ARGS
+ [
"--vpc_security_group_ids",
"sg1,sg2",
"--vpc_subnets",
"subnet1,subnet2",
]
)
response = self.component._create_job_request(
required_vpc_args.inputs, required_vpc_args.outputs
)
self.assertIn("VpcConfig", response["NetworkConfig"])
self.assertIn("sg1", response["NetworkConfig"]["VpcConfig"]["SecurityGroupIds"])
self.assertIn("sg2", response["NetworkConfig"]["VpcConfig"]["SecurityGroupIds"])
self.assertIn("subnet1", response["NetworkConfig"]["VpcConfig"]["Subnets"])
self.assertIn("subnet2", response["NetworkConfig"]["VpcConfig"]["Subnets"])
| kubeflow/pipelines | components/aws/sagemaker/tests/unit_tests/tests/process/test_process_component.py | Python | apache-2.0 | 8,560 |
import os
from locust import HttpUser, TaskSet, task, between
from locust.clients import HttpSession
class MultipleHostsUser(HttpUser):
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_client = HttpSession(base_url=os.environ["API_HOST"])
class UserTasks(TaskSet):
# but it might be convenient to use the @task decorator
@task
def index(self):
self.user.client.get("/")
@task
def index_other_host(self):
self.user.api_client.get("/stats/requests")
class WebsiteUser(MultipleHostsUser):
"""
User class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
tasks = [UserTasks]
| mbeacom/locust | examples/multiple_hosts.py | Python | mit | 788 |
from django.contrib.auth.models import User
from tastypie import http
from tastypie import fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.resources import Resource
from tastypie.bundle import Bundle
from haystack.query import SearchQuerySet
from agora_site.misc.utils import GenericForeignKeyField
from agora_site.misc.generic_resource import (GenericResource,
GenericResourceMixin, GenericMeta)
from agora_site.agora_core.models.agora import Agora
from agora_site.agora_core.models.election import Election
from agora_site.agora_core.models.castvote import CastVote
from agora_site.agora_core.models import Profile
from agora_site.agora_core.resources.agora import TinyAgoraResource
from agora_site.agora_core.resources.election import TinyElectionResource
from agora_site.agora_core.resources.user import TinyProfileResource
# NOTE that GenericResourceMixin must take precedence in inheritance so that
# we can make sure its GenericResourceMixin.api_field_from_django_field is
# used
class SearchResource(GenericResourceMixin, Resource):
'''
Resource used for general search, internally uses Haystack.
It allows searching using the GET param "q". I.e. /search/?q=foobar
Loosely based on http://django-tastypie.readthedocs.org/en/latest/non_orm_data_sources.html
'''
obj = GenericForeignKeyField({
Agora: TinyAgoraResource,
Election: TinyElectionResource,
Profile: TinyProfileResource,
}, 'object', full=True)
class Meta(GenericMeta):
list_allowed_methods = ['get']
detail_allowed_methods = ['get']
def detail_uri_kwargs(self, bundle_or_obj):
'''
processes kwargs for detail uris. TastyPie's Resource class requires
an implementation of this function to work.
'''
kwargs = {}
# We save object id in kwargs so that def obj_get can use it, as in
# tastypie documentation example
if isinstance(bundle_or_obj, Bundle):
kwargs['id'] = bundle_or_obj.obj.object.id
else:
kwargs['id'] = bundle_or_obj.object.id
return kwargs
def build_bundle(self, obj=None, data=None, request=None, objects_saved=None):
"""
Reimplemented so that it uses always and directly obj even if it's none
otherwise tastypie would try to instance object_class which is not
defined.
It also saves the request for later usage in obj_get_list.
"""
self.request = request
return Bundle(
obj=obj,
data=data,
request=request,
objects_saved=objects_saved
)
def get_search_query_set(self, request):
model = request.GET.get("model", None)
models = {'agora': Agora,
'castvote': CastVote,
'election': Election
}
if model and model in models:
return SearchQuerySet().models(models[model])
return SearchQuerySet()
def get_object_list(self, request):
'''
By default search lists all objects in haystack
'''
return self.get_search_query_set(request)
def obj_get_list(self, **kwargs):
'''
Returns a filtered object lists. Allows filtering using a query string
using the GET param "q"
'''
query = self.request.GET.get("q", None)
if query:
q = self.get_search_query_set(self.request)
q = q.auto_query(query)
return q
return self.get_object_list(self.request)
def obj_get(self, request=None, **kwargs):
'''
Retrieves a detailed search item
'''
return SearchQuerySet().filter(id=kwargs['id'])[0]
| joanma100/agora-ciudadana | agora_site/agora_core/resources/search.py | Python | agpl-3.0 | 3,770 |
#!/usr/bin/env python
import mozhttpd
import mozfile
import os
import tempfile
import unittest
class TestBasic(unittest.TestCase):
""" Test basic Mozhttpd capabilites """
def test_basic(self):
""" Test mozhttpd can serve files """
tempdir = tempfile.mkdtemp()
# sizes is a dict of the form: name -> [size, binary_string, filepath]
sizes = {'small': [128], 'large': [16384]}
for k in sizes.keys():
# Generate random binary string
sizes[k].append(os.urandom(sizes[k][0]))
# Add path of file with binary string to list
fpath = os.path.join(tempdir, k)
sizes[k].append(fpath)
# Write binary string to file
with open(fpath, 'wb') as f:
f.write(sizes[k][1])
server = mozhttpd.MozHttpd(docroot=tempdir)
server.start()
server_url = server.get_url()
# Retrieve file and check contents matchup
for k in sizes.keys():
retrieved_content = mozfile.load(server_url + k).read()
self.assertEqual(retrieved_content, sizes[k][1])
# Cleanup tempdir and related files
mozfile.rmtree(tempdir)
if __name__ == '__main__':
unittest.main()
| vladikoff/fxa-mochitest | tests/mozbase/mozhttpd/tests/basic.py | Python | mpl-2.0 | 1,260 |
# -*- coding: utf-8 -*-
"""
gspread.client
~~~~~~~~~~~~~~
This module contains Client class responsible for communicating with
Google Data API.
"""
import re
import warnings
from xml.etree import ElementTree
from . import __version__
from . import urlencode
from .ns import _ns
from .httpsession import HTTPSession, HTTPError
from .models import Spreadsheet
from .urls import construct_url
from .utils import finditem
from .exceptions import (AuthenticationError, SpreadsheetNotFound,
NoValidUrlKeyFound, UpdateCellError,
RequestError)
AUTH_SERVER = 'https://www.google.com'
SPREADSHEETS_SERVER = 'spreadsheets.google.com'
_url_key_re_v1 = re.compile(r'key=([^&#]+)')
_url_key_re_v2 = re.compile(r'spreadsheets/d/([^&#]+)/edit')
class Client(object):
"""An instance of this class communicates with Google Data API.
:param auth: A tuple containing an *email* and a *password* used for ClientLogin
authentication or an OAuth2 credential object. Credential objects are those created by the
oauth2client library. https://github.com/google/oauth2client
:param http_session: (optional) A session object capable of making HTTP requests while persisting headers.
Defaults to :class:`~gspread.httpsession.HTTPSession`.
>>> c = gspread.Client(auth=('[email protected]', 'qwertypassword'))
or
>>> c = gspread.Client(auth=OAuthCredentialObject)
"""
def __init__(self, auth, http_session=None):
self.auth = auth
self.session = http_session or HTTPSession()
def _get_auth_token(self, content):
for line in content.splitlines():
if line.startswith('Auth='):
return line[5:]
return None
def _deprecation_warning(self):
warnings.warn("""
ClientLogin is deprecated:
https://developers.google.com/identity/protocols/AuthForInstalledApps?csw=1
Authorization with email and password will stop working on April 20, 2015.
Please use oAuth2 authorization instead:
http://gspread.readthedocs.org/en/latest/oauth2.html
""", Warning)
def _ensure_xml_header(self, data):
if data.startswith('<?xml'):
return data
else:
return "<?xml version='1.0' encoding='utf8'?>%s" % data
def login(self):
"""Authorize client using ClientLogin protocol.
The credentials provided in `auth` parameter to class' constructor will be used.
This method is using API described at:
http://code.google.com/apis/accounts/docs/AuthForInstalledApps.html
:raises AuthenticationError: if login attempt fails.
"""
source = 'burnash-gspread-%s' % __version__
service = 'wise'
if hasattr(self.auth, 'access_token'):
if not self.auth.access_token or \
(hasattr(self.auth, 'access_token_expired') and self.auth.access_token_expired):
import httplib2
http = httplib2.Http()
self.auth.refresh(http)
self.session.add_header('Authorization', "Bearer " + self.auth.access_token)
else:
self._deprecation_warning()
data = {'Email': self.auth[0],
'Passwd': self.auth[1],
'accountType': 'HOSTED_OR_GOOGLE',
'service': service,
'source': source}
url = AUTH_SERVER + '/accounts/ClientLogin'
try:
r = self.session.post(url, data)
content = r.read().decode()
token = self._get_auth_token(content)
auth_header = "GoogleLogin auth=%s" % token
self.session.add_header('Authorization', auth_header)
except HTTPError as ex:
if ex.message.strip() == '403: Error=BadAuthentication':
raise AuthenticationError("Incorrect username or password")
else:
raise AuthenticationError(
"Unable to authenticate. %s" % ex.message)
def open(self, title):
"""Opens a spreadsheet, returning a :class:`~gspread.Spreadsheet` instance.
:param title: A title of a spreadsheet.
If there's more than one spreadsheet with same title the first one
will be opened.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `title` is found.
>>> c = gspread.Client(auth=('[email protected]', 'qwertypassword'))
>>> c.login()
>>> c.open('My fancy spreadsheet')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
elem_title = elem.find(_ns('title')).text
if elem_title.strip() == title:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_key(self, key):
"""Opens a spreadsheet specified by `key`, returning a :class:`~gspread.Spreadsheet` instance.
:param key: A key of a spreadsheet as it appears in a URL in a browser.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `key` is found.
>>> c = gspread.Client(auth=('[email protected]', 'qwertypassword'))
>>> c.login()
>>> c.open_by_key('0BmgG6nO_6dprdS1MN3d3MkdPa142WFRrdnRRUWl1UFE')
"""
feed = self.get_spreadsheets_feed()
for elem in feed.findall(_ns('entry')):
alter_link = finditem(lambda x: x.get('rel') == 'alternate',
elem.findall(_ns('link')))
m = _url_key_re_v1.search(alter_link.get('href'))
if m and m.group(1) == key:
return Spreadsheet(self, elem)
m = _url_key_re_v2.search(alter_link.get('href'))
if m and m.group(1) == key:
return Spreadsheet(self, elem)
else:
raise SpreadsheetNotFound
def open_by_url(self, url):
"""Opens a spreadsheet specified by `url`,
returning a :class:`~gspread.Spreadsheet` instance.
:param url: URL of a spreadsheet as it appears in a browser.
:raises gspread.SpreadsheetNotFound: if no spreadsheet with
specified `url` is found.
>>> c = gspread.Client(auth=('[email protected]', 'qwertypassword'))
>>> c.login()
>>> c.open_by_url('https://docs.google.com/spreadsheet/ccc?key=0Bm...FE&hl')
"""
m1 = _url_key_re_v1.search(url)
if m1:
return self.open_by_key(m1.group(1))
else:
m2 = _url_key_re_v2.search(url)
if m2:
return self.open_by_key(m2.group(1))
else:
raise NoValidUrlKeyFound
def openall(self, title=None):
"""Opens all available spreadsheets,
returning a list of a :class:`~gspread.Spreadsheet` instances.
:param title: (optional) If specified can be used to filter
spreadsheets by title.
"""
feed = self.get_spreadsheets_feed()
result = []
for elem in feed.findall(_ns('entry')):
if title is not None:
elem_title = elem.find(_ns('title')).text
if elem_title.strip() != title:
continue
result.append(Spreadsheet(self, elem))
return result
def get_spreadsheets_feed(self, visibility='private', projection='full'):
url = construct_url('spreadsheets',
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_worksheets_feed(self, spreadsheet,
visibility='private', projection='full'):
url = construct_url('worksheets', spreadsheet,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_cells_feed(self, worksheet,
visibility='private', projection='full', params=None):
url = construct_url('cells', worksheet,
visibility=visibility, projection=projection)
if params:
params = urlencode(params)
url = '%s?%s' % (url, params)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def get_feed(self, url):
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def del_worksheet(self, worksheet):
url = construct_url(
'worksheet', worksheet, 'private', 'full', worksheet_version=worksheet.version)
r = self.session.delete(url)
# Even though there is nothing interesting in the response body
# we have to read it or the next request from this session will get a
# httplib.ResponseNotReady error.
r.read()
def get_cells_cell_id_feed(self, worksheet, cell_id,
visibility='private', projection='full'):
url = construct_url('cells_cell_id', worksheet, cell_id=cell_id,
visibility=visibility, projection=projection)
r = self.session.get(url)
return ElementTree.fromstring(r.read())
def list_rows(self, worksheet):
url = construct_url('list', worksheet)
r = self.session.get(url)
feed = ElementTree.fromstring(r.read())
return feed.findall(_ns('entry'))
def delete_row(self, row_edit_url):
self.session.delete(row_edit_url)
def put_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml',
'If-Match': '*'}
data = self._ensure_xml_header(data)
try:
r = self.session.put(url, data, headers=headers)
except HTTPError as ex:
if getattr(ex, 'code', None) == 403:
raise UpdateCellError(ex.message)
else:
raise ex
return ElementTree.fromstring(r.read())
def post_feed(self, url, data):
headers = {'Content-Type': 'application/atom+xml'}
data = self._ensure_xml_header(data)
try:
r = self.session.post(url, data, headers=headers)
except HTTPError as ex:
raise RequestError(ex.message)
return ElementTree.fromstring(r.read())
def post_cells(self, worksheet, data):
headers = {'Content-Type': 'application/atom+xml',
'If-Match': '*'}
data = self._ensure_xml_header(data)
url = construct_url('cells_batch', worksheet)
r = self.session.post(url, data, headers=headers)
return ElementTree.fromstring(r.read())
def login(email, password):
"""Login to Google API using `email` and `password`.
This is a shortcut function which instantiates :class:`Client`
and performs login right away.
:returns: :class:`Client` instance.
"""
client = Client(auth=(email, password))
client.login()
return client
def authorize(credentials):
"""Login to Google API using OAuth2 credentials.
This is a shortcut function which instantiates :class:`Client`
and performs login right away.
:returns: :class:`Client` instance.
"""
client = Client(auth=credentials)
client.login()
return client
| amleczko/gspread | gspread/client.py | Python | mit | 11,658 |
from __future__ import absolute_import
from os.path import join, abspath, dirname
from .utils import gen_cookie_secret
PROJECT_ROOT = abspath(dirname(__file__))
APP_SETTINGS = dict(
template_path=join(PROJECT_ROOT, "templates"),
static_path=join(PROJECT_ROOT, "static"),
cookie_secret=gen_cookie_secret(),
login_url='/login',
)
URL_PREFIX = ''
PAGE_UPDATE_INTERVAL = 2000
CELERY_EVENTS_ENABLE_INTERVAL = 5000
CELERY_INSPECT_TIMEOUT = 1000
| kony-solutions/flower | flower/settings.py | Python | bsd-3-clause | 461 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file ui_wallet_dlg_options1.ui
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_WdgOptions1(object):
def setupUi(self, WdgOptions1):
WdgOptions1.setObjectName("WdgOptions1")
WdgOptions1.resize(243, 76)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(WdgOptions1.sizePolicy().hasHeightForWidth())
WdgOptions1.setSizePolicy(sizePolicy)
WdgOptions1.setStyleSheet("")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(WdgOptions1)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.frame = QtWidgets.QFrame(WdgOptions1)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout.setContentsMargins(6, 6, 6, 6)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.chbShowAddresses = QtWidgets.QCheckBox(self.frame)
self.chbShowAddresses.setObjectName("chbShowAddresses")
self.verticalLayout.addWidget(self.chbShowAddresses)
self.chbShowZeroBalanceAddresses = QtWidgets.QCheckBox(self.frame)
self.chbShowZeroBalanceAddresses.setObjectName("chbShowZeroBalanceAddresses")
self.verticalLayout.addWidget(self.chbShowZeroBalanceAddresses)
self.chbShowNotUsedAddresses = QtWidgets.QCheckBox(self.frame)
self.chbShowNotUsedAddresses.setObjectName("chbShowNotUsedAddresses")
self.verticalLayout.addWidget(self.chbShowNotUsedAddresses)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(0)
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.btnApply = QtWidgets.QToolButton(self.frame)
self.btnApply.setObjectName("btnApply")
self.horizontalLayout.addWidget(self.btnApply)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2.addWidget(self.frame)
self.retranslateUi(WdgOptions1)
QtCore.QMetaObject.connectSlotsByName(WdgOptions1)
def retranslateUi(self, WdgOptions1):
_translate = QtCore.QCoreApplication.translate
WdgOptions1.setWindowTitle(_translate("WdgOptions1", "Form"))
self.chbShowAddresses.setText(_translate("WdgOptions1", "Show individual addresses"))
self.chbShowZeroBalanceAddresses.setText(_translate("WdgOptions1", "Show addresses with zero balance"))
self.chbShowNotUsedAddresses.setText(_translate("WdgOptions1", "Show addresses not yet used"))
self.btnApply.setText(_translate("WdgOptions1", "Apply"))
| Bertrand256/dash-masternode-tool | src/ui/ui_wallet_dlg_options1.py | Python | mit | 3,432 |
# Copyright (C) 2010 Linaro Limited
#
# Author: Zygmunt Krynicki <[email protected]>
#
# This file is part of Launch Control.
#
# Launch Control is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Launch Control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Launch Control. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for the Attachment model
"""
from django.contrib.contenttypes import generic
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from dashboard_app.models import Attachment
class ModelWithAttachments(models.Model):
"""
Test model that uses attachments
"""
attachments = generic.GenericRelation(Attachment)
class Meta:
# This requires a bit of explanation. Traditionally we could add new
# models inside test modules and they would be picked up by django and
# synchronized (created in the test database) as a part of the test
# provisioning process.
# Since we started using south, synchronization is no longer occurring
# for the 'dashboard_app' application. This caused some test failures
# such as any tests that depended on the existence of this model.
# As a workaround we artificially "stick" this model into the only
# application that we can count on to exist _and_ not use south as well
# -- that is south itself.
# This way the test model gets synchronized when south is synchronized
# and all the test code below works as expected.
app_label = "south"
class AttachmentTestCase(TestCase):
_CONTENT = "text"
_FILENAME = "filename"
def setUp(self):
self.obj = ModelWithAttachments.objects.create()
def test_attachment_can_be_added_to_models(self):
attachment = self.obj.attachments.create(
content_filename=self._FILENAME, content=None)
self.assertEqual(attachment.content_object, self.obj)
def test_attachment_can_be_accessed_via_model(self):
self.obj.attachments.create(
content_filename=self._FILENAME, content=None)
self.assertEqual(self.obj.attachments.count(), 1)
retrieved_attachment = self.obj.attachments.all()[0]
self.assertEqual(retrieved_attachment.content_object, self.obj)
def test_attachment_stores_data(self):
attachment = self.obj.attachments.create(
content_filename=self._FILENAME, content=None)
attachment.content.save(
self._FILENAME,
ContentFile(self._CONTENT))
self.assertEqual(attachment.content_filename, self._FILENAME)
attachment.content.open()
try:
self.assertEqual(attachment.content.read(), self._CONTENT)
finally:
attachment.content.close()
attachment.content.delete(save=False)
def test_unicode(self):
obj = Attachment(content_filename="test.json")
self.assertEqual(unicode(obj), "test.json")
| OSSystems/lava-server | dashboard_app/tests/models/attachment.py | Python | agpl-3.0 | 3,410 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import _env
import json
import re
import time
from lib._db import get_collection
from config.config import CONFIG
from judge_upload import exist_or_insert
DB = CONFIG.MONGO.DATABASE
PAT = re.compile('[-#\w]+')
def find_first_tag(s):
m = PAT.search(s)
return m.group() if m else s
def remove_china_char(s):
if not s:
return s
return re.sub(ur"[\u4e00-\u9fa5]+", '', s)
def cur_timestamp():
return int(time.time() * 1000)
USERS = [
{
"id": 1,
"name": "jishushare",
"slug": "jishushare",
"email": "[email protected]",
"image": None,
"cover": None,
"bio": None,
"website": None,
"location": None,
"accessibility": None,
"status": "active",
"language": "zh_CN",
"meta_title": None,
"meta_description": None,
"last_login": None,
"created_at": 1283780649000,
"created_by": 1,
"updated_at": 1286958624000,
"updated_by": 1
}
]
TAGS = [
{
"id": 1,
"name": u'文章',
"slug": u'article',
"description": ""
}
]
def replace_post(post_data):
d = {
"id": 5,
"title": "my blog post title",
"slug": "my-blog-post-title",
"markdown": "the *markdown* formatted post body",
#"html": "the <i>html</i> formatted post body",
"image": None,
"featured": 0,
"page": 0,
"status": "published",
"language": "zh_CN",
"meta_title": None,
"meta_description": None,
"author_id": 1,
"created_at": cur_timestamp(),
"created_by": 1,
"updated_at": cur_timestamp(),
"updated_by": 1,
"published_at": cur_timestamp(),
"published_by": 1
}
d['id'] = int(post_data['source_url'].rsplit('/', 1)[1].split('.')[0])
d['title'] = post_data['title'].strip()
d['slug'] = post_data['title'].strip().replace(' ', '-').lower()
d['markdown'] = post_data['content'].strip()
return d
def migrate(coll_name='article', limit=10):
res = {
"meta": {
"exported_on": cur_timestamp(),
"version": "003"
}
}
coll = get_collection(DB, coll_name)
posts = []
posts_tags = []
index = 0
for doc in coll.find().batch_size(1000):
title = doc.get('title')
if not exist_or_insert(title):
doc_id = doc.get('_id')
post_id = int(doc['source_url'].rsplit('/', 1)[1].split('.')[0])
index += 1
if index > limit:
break
posts.append(replace_post(doc))
posts_tags.append(
{"tag_id": 1, "post_id": post_id}
)
data = {
"posts": posts,
"tags": TAGS,
"posts_tags": posts_tags,
"users": USERS
}
res["data"] = data
return res
def tag_migrate(limit=10):
res = {
"meta": {
"exported_on": cur_timestamp(),
"version": "003"
}
}
coll = get_collection(DB, 'article')
posts = []
tags_id_map = {}
posts_tags = []
tag_id = 1000
index = 0
for doc in coll.find().batch_size(1000):
#print(doc.get('title'))
index += 1
if index > limit:
break
posts.append(replace_post(doc))
post_id = int(doc['source_url'].rsplit('/', 1)[1].split('.')[0])
tag_list = doc.get('tag_list')
tag = tag_list[0] if tag_list else ''
tag = remove_china_char(tag)
if tag:
save_tag = tag.replace(' ', '-').lower()
save_tag = find_first_tag(save_tag)
if len(save_tag) > 10:
posts_tags.append(
{"tag_id": 1, "post_id": post_id}
)
continue
if save_tag not in tags_id_map:
tag_id += 1
TAGS.append({
"id": tag_id,
"name": save_tag,
"slug": save_tag,
"description": ""
})
tags_id_map[save_tag] = tag_id
posts_tags.append(
{"tag_id": tags_id_map[save_tag], "post_id": post_id}
)
else:
posts_tags.append(
{"tag_id": tags_id_map[save_tag], "post_id": post_id}
)
data = {
"posts": posts,
"tags": TAGS,
"posts_tags": posts_tags,
"users": USERS
}
res["data"] = data
return res
def test():
print(find_first_tag('正则表达式指南'))
def main():
import sys
try:
cnt = int(sys.argv[1])
except:
cnt = 3000
res = migrate('article', cnt)
print(json.dumps(res, indent=4))
if __name__ == '__main__':
main()
| PegasusWang/articles | crawler/jb51/jb51_to_ghost.py | Python | mit | 5,080 |
# Created by Adam Streck, 2013-2015, [email protected]
#
# This file is part of the Toolkit for Reverse Engineering of Molecular Pathways
# via Parameter Identification (TREMPPI)
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import os.path
from tremppi.header import data_folder, configure_filename, system_init, system, model_file
from tremppi.clean import clean
from tremppi.configure import configure
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Erase the dynamic labels and the values for the properties.')
parser.add_argument('--path', help='specify the location to unfreeze')
parser.add_argument('--server', help='informs the script that it is called from a server.', action='store_true')
args = parser.parse_args()
system_init(sys.argv[0], args)
DATA_PATH = os.path.join(system.DEST_PATH, data_folder)
if not os.path.exists(os.path.join(system.DEST_PATH, configure_filename)):
raise Exception('The target folder ' + system.DEST_PATH + ' does not seem to be a TREMPPI project folder. The ' + configure_filename + ' is missing.')
else:
clean(DATA_PATH, 'properties')
configure(DATA_PATH, 'properties')
configure(DATA_PATH, 'tools') | xstreck1/TREMPPI | python/unfreeze.py | Python | gpl-3.0 | 1,852 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Author: echel0n <[email protected]>
# URL: http://www.github.com/sickragetv/sickrage/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import sickbeard
from sickbeard import common
from pynma import pynma
class NMA_Notifier:
def test_notify(self, nma_api, nma_priority):
return self._sendNMA(nma_api, nma_priority, event="Test", message="Testing NMA settings from SiCKRAGE",
force=True)
def notify_snatch(self, ep_name):
if sickbeard.NMA_NOTIFY_ONSNATCH:
self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SNATCH],
message=ep_name)
def notify_download(self, ep_name):
if sickbeard.NMA_NOTIFY_ONDOWNLOAD:
self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_DOWNLOAD],
message=ep_name)
def notify_subtitle_download(self, ep_name, lang):
if sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD:
self._sendNMA(nma_api=None, nma_priority=None, event=common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD],
message=ep_name + ": " + lang)
def notify_git_update(self, new_version="??"):
if sickbeard.USE_NMA:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
self._sendNMA(nma_api=None, nma_priority=None, event=title, message=update_text + new_version)
def _sendNMA(self, nma_api=None, nma_priority=None, event=None, message=None, force=False):
title = 'SiCKRAGE'
if not sickbeard.USE_NMA and not force:
return False
if nma_api == None:
nma_api = sickbeard.NMA_API
if nma_priority == None:
nma_priority = sickbeard.NMA_PRIORITY
batch = False
p = pynma.PyNMA()
keys = nma_api.split(',')
p.addkey(keys)
if len(keys) > 1: batch = True
logging.debug("NMA: Sending notice with details: event=\"%s\", message=\"%s\", priority=%s, batch=%s" % (
event, message, nma_priority, batch))
response = p.push(application=title, event=event, description=message, priority=nma_priority, batch_mode=batch)
if not response[nma_api][b'code'] == '200':
logging.error('Could not send notification to NotifyMyAndroid')
return False
else:
logging.info("NMA: Notification sent to NotifyMyAndroid")
return True
notifier = NMA_Notifier
| mcus/SickRage | sickbeard/notifiers/nma.py | Python | gpl-3.0 | 3,311 |
####################################################################################################
#
# Musica - A Music Theory Package for Python
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
__all__ = [
]
####################################################################################################
import math
from ..Geometry.Path import Polyline
from ..Geometry.Transformation import AffineTransformation2D
from ..Geometry.Vector import Vector2D
from ..Tex.Tikz import TikzFigure
from ..Theory.Pitch import Pitch
####################################################################################################
class KeyboardSizing:
# In Germany DIN 8996 (Klaviatur für Pianos und Flügel ; Maße)
# Yamaha is ranging from 159 to 164 mm
# white width / largeur des marches | 23.6 mm
# black width / largeur des feintes | 11.5 mm (48.7 %)
# octave width / empan de l'octave (7 keys) | 165.2 mm (23.6 * 7)
# keyboard width (88 keys) | 1 227.0 mm (+4/-0 mm)
# Key length depend of the keyboard model
# typically for a grand piano, smaller keyboards have shorter keys
# white length | 145.0 mm
# black length | 95.0 mm (-50 mm, 65.5 %)
__white_width__ = 23.6 # mm
__black_width__ = 11.5
__white_length__ = 145
__black_length__ = 85
##############################################
@classmethod
def scale(cls, **kwargs):
if 'octave_width' in kwargs:
white_width = kwargs['octave_width'] / 7
elif 'width_scale' in kwargs:
white_width = cls.__white_length__ * kwargs['width_scale']
else:
white_width = kwargs.get('white_width', cls.__white_width__)
if 'length_scale' in kwargs:
white_length = cls.__white_length__ * kwargs['length_scale']
else:
white_length = kwargs.get('white_length', cls.__white_length__)
# Fixme: black ...
return cls(white_width=white_width,
white_length=white_length)
##############################################
def __init__(self, **kwargs):
self._white_width = kwargs.get('white_width', self.__white_width__)
self._black_width = kwargs.get('black_width', self.__black_width__)
self._white_length = kwargs.get('white_length', self.__white_length__)
self._black_length = kwargs.get('black_length', self.__black_length__)
##############################################
@property
def white_width(self):
return self._white_width
@property
def white_length(self):
return self._white_length
@property
def black_width(self):
return self._black_width
@property
def black_length(self):
return self._black_length
##############################################
@property
def black_width_ratio(self):
return self._black_width / self._white_width
@property
def black_length_ratio(self):
return self._black_length / self._white_length
##############################################
@property
def octave_width(self):
return self._white_width * 7
##############################################
def keyboard_width(self, number_of_key=52):
"""A 88 key model has 52 white key and 36 black keys, spanning over 7 octaves from A0 to C8.
"""
return self._white_width * number_of_key
####################################################################################################
class KeyGeometry:
##############################################
def __init__(self, sizing):
self._sizing = sizing
self._path = None
self._lower_point = None
self._upper_point = None
##############################################
@property
def sizing(self):
return self._sizing
@property
def lower_point(self):
return self._lower_point
@property
def upper_point(self):
return self._upper_point
@property
def path(self):
return self._path
####################################################################################################
class BlackKeyGeometry(KeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
x_sup = self._sizing.black_width / 2
x_inf = -x_sup
y_sup = self._sizing.white_length
y_inf = y_sup - self._sizing.black_length
x_center = (x_sup + x_inf) / 2
self._lower_point = Vector2D(x_center, 0)
self._upper_point = Vector2D(x_center, y_sup)
self._path = Polyline(
(x_inf, y_inf),
(x_inf, y_sup),
(x_sup, y_sup),
(x_sup, y_inf),
)
####################################################################################################
class WhiteKeyGeometry(KeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
self._x_inf = 0
self._x_sup = self._sizing.white_width
self._y_inf = 0
self._y_sup = self._sizing.white_length
self._x_black_inf = self._sizing.black_width / 2
self._x_black_sup = self._x_sup - self._x_black_inf
self._y_black = self._y_sup - self._sizing.black_length
self._lower_corner = Vector2D(self._x_inf, self._y_inf)
self._upper_corner = Vector2D(self._x_sup, self._y_sup)
x_center = (self._x_sup + self._x_inf) / 2
y_center = (self._y_black + self._y_inf) / 2
self._lower_point = Vector2D(x_center, self._y_inf)
self._upper_point = Vector2D(x_center, self._y_sup)
self._center = Vector2D(x_center, y_center)
##############################################
@property
def lower_corner(self):
return self._lower_corner
@property
def upper_corner(self):
return self._upper_corner
@property
def center(self):
return self._center
####################################################################################################
class WhiteLeftKeyGeometry(WhiteKeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
self._path = Polyline(
(self._x_inf, self._y_inf),
(self._x_inf, self._y_black),
(self._x_black_inf, self._y_black),
(self._x_black_inf, self._y_sup),
(self._x_sup, self._y_sup),
(self._x_sup, self._y_inf),
)
####################################################################################################
class WhiteCenterKeyGeometry(WhiteKeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
self._path = Polyline(
(self._x_inf, self._y_inf),
(self._x_inf, self._y_black),
(self._x_black_inf, self._y_black),
(self._x_black_inf, self._y_sup),
(self._x_black_sup, self._y_sup),
(self._x_black_sup, self._y_black),
(self._x_sup, self._y_black),
(self._x_sup, self._y_inf),
)
####################################################################################################
class WhiteRightKeyGeometry(WhiteKeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
self._path = Polyline(
(self._x_inf, self._y_inf),
(self._x_inf, self._y_sup),
(self._x_black_sup, self._y_sup),
(self._x_black_sup, self._y_black),
(self._x_sup, self._y_black),
(self._x_sup, self._y_inf),
)
####################################################################################################
class WhiteFullKeyGeometry(WhiteKeyGeometry):
##############################################
def __init__(self, sizing):
super().__init__(sizing)
self._path = Polyline(
(self._x_inf, self._y_inf),
(self._x_inf, self._y_sup),
(self._x_sup, self._y_sup),
(self._x_sup, self._y_inf),
)
####################################################################################################
class Key:
##############################################
def __init__(self, key_number, pitch, geometry, transformation):
self._number = key_number
self._pitch = pitch
self._geometry = geometry
self._transformation = transformation
##############################################
@property
def number(self):
return self._number
@property
def pitch(self):
return self._pitch
@property
def is_black(self):
return self._pitch.is_altered
@property
def geometry(self):
return self._geometry
@property
def transformation(self):
return self._transformation
@property
def transformed_path(self):
return self._transformation * self._geometry.path
##############################################
def _transform_point(self, point):
return Vector2D(self._transformation * point)
##############################################
@property
def lower_corner(self):
return self._transform_point(self._geometry.lower_corner)
@property
def upper_corner(self):
return self._transform_point(self._geometry.upper_corner)
@property
def lower_point(self):
return self._transform_point(self._geometry.lower_point)
@property
def upper_point(self):
return self._transform_point(self._geometry.upper_point)
@property
def center(self):
return self._transform_point(self._geometry.center)
####################################################################################################
class KeyboardGeometry:
##############################################
def __init__(self, sizing=KeyboardSizing(), first_pitch='A0', last_pitch='C8'):
self._sizing = sizing
self._first_pitch = first_pitch
self._last_pitch = last_pitch
self._black_geometry = BlackKeyGeometry(sizing)
self._white_left_geometry = WhiteLeftKeyGeometry(sizing)
self._white_center_geometry = WhiteCenterKeyGeometry(sizing)
self._white_right_geometry = WhiteRightKeyGeometry(sizing)
self._white_full_geometry = WhiteFullKeyGeometry(sizing)
self._keys = []
pitches = [pitch for pitch in Pitch(self._first_pitch).pitch_iterator(Pitch(self._last_pitch))]
last_pitch_index = len(pitches) -1
position = 0
for i, pitch in enumerate(pitches):
if pitch.is_altered:
geometry = self._black_geometry
elif i == last_pitch_index:
geometry = self._white_full_geometry
else:
if i > 0:
prev_is_black = pitches[i-1].is_altered
else:
prev_is_black = False
if i < last_pitch_index:
next_is_black = pitches[i+1].is_altered
else:
next_is_black = False
if next_is_black:
if prev_is_black:
geometry = self._white_center_geometry
else:
geometry = self._white_right_geometry
else:
geometry = self._white_left_geometry
offset = Vector2D(position*self._sizing.white_width, 0)
translation = AffineTransformation2D.Translation(offset)
key = Key(i +1, pitch, geometry, translation)
self._keys.append(key)
if not pitch.is_altered:
position += 1
##############################################
@property
def key_length(self):
return self._sizing.white_length
##############################################
def key_length_offset(self, ratio):
return Vector2D(0, self._sizing.white_length * ratio / 100)
##############################################
def __iter__(self):
return iter(self._keys)
##############################################
def __getitem__(self, slice_):
return self._keys[slice_]
####################################################################################################
class Keyboard(TikzFigure):
##############################################
def __init__(self,
first_pitch,
last_pitch,
style,
):
super().__init__(options='x=.1mm,y=.1mm')
self.set_main_font('Latin Modern Sans') # Roman
self.font_size(4)
geometry = KeyboardGeometry(first_pitch=first_pitch, last_pitch=last_pitch)
for key in geometry:
pitch = key.pitch
kwargs = dict(close=True)
if key.is_black:
kwargs['fill'] = 'black'
kwargs['draw'] = 'black'
else:
kwargs['fill'] = 'white'
kwargs['draw'] = 'black'
for pitch_key in (pitch.full_name, pitch.step):
if pitch_key in style:
kwargs.update(style[pitch_key])
break
self.path(key.transformed_path, **kwargs)
if key.number == 44:
self.line(key.upper_corner, key.upper_corner + geometry.key_length_offset(20))
if key.is_black:
frequency_point = key.lower_point - geometry.key_length_offset(30)
else:
position = key.center + geometry.key_length_offset(key.pitch.degree)
self.text(position, pitch.full_name)
self.text(position - geometry.key_length_offset(5), pitch.french_locale.name, anchor='north')
self.text(key.upper_point, key.number, anchor='south')
frequency_point = key.lower_point
frequency = int(round(pitch.frequency))
# frequency = '{:.1f}'.format(pitch.frequency)
self.text(frequency_point, frequency, anchor='east', rotate=90)
####################################################################################################
class FullKeyboard(Keyboard):
##############################################
def __init__(self):
super().__init__(
first_pitch='A0',
last_pitch='C8',
style={
'C4': {'fill':'red!10'},
'A4': {'fill':'red!10'},
'C': {'fill':'blue!10'},
# 'D': {'fill':'red!10'},
# 'E': {'fill':'red!10'},
# 'F': {'fill':'red!10'},
# 'G': {'fill':'green!10'},
# 'A': {'fill':'green!10'},
# 'B': {'fill':'green!10'},
}
)
| FabriceSalvaire/Musica | Musica/Figure/Keyboard.py | Python | gpl-3.0 | 16,175 |
#!/usr/bin/python
#
# Copyright (C) 2011, Nokia Corporation <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
"""
Tests trying to simulate the behaviour of applications working with tracker
"""
import sys,os,dbus
import unittest
import time
import random
import string
import datetime
import shutil
import fcntl
from common.utils import configuration as cfg
import unittest2 as ut
from common.utils.applicationstest import CommonTrackerApplicationTest as CommonTrackerApplicationTest
from common.utils.helpers import log
MINER_FS_IDLE_TIMEOUT = 30
class TrackerCameraPicturesApplicationTests (CommonTrackerApplicationTest):
def test_01_camera_picture (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_picture_01/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_image ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_image ())
dest_fileuri = "file://" + dest_filepath
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Image,
nfo:Media,
nfo:Visual,
nmm:Photo
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"image/jpeg\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Copy the image to the dest path
self.slowcopy_file (origin_filepath, dest_filepath)
assert os.path.exists (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
def test_02_camera_picture_geolocation (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Set nlo:location
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_picture_02/" + str(random.randint (0,100))
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_image ())
dest_fileuri = "file://" + dest_filepath
geolocationurn = "tracker://test_camera_picture_02_geolocation/" + str(random.randint (0,100))
postaladdressurn = "tracker://test_camera_picture_02_postaladdress/" + str(random.randint (0,100))
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Image,
nfo:Media,
nfo:Visual,
nmm:Photo
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"image/jpeg\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# FIRST, open the file for writing, and just write some garbage, to simulate that
# we already started recording the video...
fdest = open (dest_filepath, 'wb')
fdest.write ("some garbage written here")
fdest.write ("to simulate we're recording something...")
fdest.seek (0)
# SECOND, set slo:location
location_insert = """
INSERT { <%s> a nco:PostalAddress ;
nco:country \"SPAIN\" ;
nco:locality \"Tres Cantos\"
}
INSERT { <%s> a slo:GeoLocation ;
slo:postalAddress <%s>
}
INSERT { <%s> a rdfs:Resource ;
slo:location <%s>
}
""" % (postaladdressurn, geolocationurn, postaladdressurn, fileurn, geolocationurn)
self.tracker.update (location_insert)
#THIRD, start copying the image to the dest path
original_file = os.path.join (self.get_data_dir (),self.get_test_image ())
self.slowcopy_file_fd (original_file, fdest)
fdest.close ()
assert os.path.exists (dest_filepath)
# FOURTH, ensure we have only 1 resource
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
class TrackerCameraVideosApplicationTests (CommonTrackerApplicationTest):
def test_01_camera_video (self):
"""
Camera video recording simulation:
1. Create resource in the store for the new file
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_video_01/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_video ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_video ())
dest_fileuri = "file://" + dest_filepath
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Video,
nfo:Media,
nfo:Visual,
nmm:Video
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"video/mp4\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Copy the image to the dest path
self.slowcopy_file (origin_filepath, dest_filepath)
assert os.path.exists (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
def test_02_camera_video_geolocation (self):
"""
Camera simulation:
1. Create resource in the store for the new file
2. Set nlo:location
2. Write the file
3. Wait for miner-fs to index it
4. Ensure no duplicates are found
"""
fileurn = "tracker://test_camera_video_02/" + str(random.randint (0,100))
origin_filepath = os.path.join (self.get_data_dir (), self.get_test_video ())
dest_filepath = os.path.join (self.get_dest_dir (), self.get_test_video ())
dest_fileuri = "file://" + dest_filepath
geolocationurn = "tracker://test_camera_video_02_geolocation/" + str(random.randint (0,100))
postaladdressurn = "tracker://test_camera_video_02_postaladdress/" + str(random.randint (0,100))
# Insert new resource in the store, including nie:mimeType and nie:url
insert = """
INSERT { <%s> a nie:InformationElement,
nie:DataObject,
nfo:Video,
nfo:Media,
nfo:Visual,
nmm:Video
}
DELETE { <%s> nie:mimeType ?_1 }
WHERE { <%s> nie:mimeType ?_1 }
INSERT { <%s> a rdfs:Resource ;
nie:mimeType \"video/mp4\"
}
DELETE { <%s> nie:url ?_2 }
WHERE { <%s> nie:url ?_2 }
INSERT { <%s> a rdfs:Resource ;
nie:url \"%s\"
}
""" % (fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, fileurn, dest_fileuri)
self.tracker.update (insert)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# FIRST, open the file for writing, and just write some garbage, to simulate that
# we already started recording the video...
fdest = open (dest_filepath, 'wb')
fdest.write ("some garbage written here")
fdest.write ("to simulate we're recording something...")
fdest.seek (0)
# SECOND, set slo:location
location_insert = """
INSERT { <%s> a nco:PostalAddress ;
nco:country \"SPAIN\" ;
nco:locality \"Tres Cantos\"
}
INSERT { <%s> a slo:GeoLocation ;
slo:postalAddress <%s>
}
INSERT { <%s> a rdfs:Resource ;
slo:location <%s>
}
""" % (postaladdressurn, geolocationurn, postaladdressurn, fileurn, geolocationurn)
self.tracker.update (location_insert)
#THIRD, start copying the image to the dest path
self.slowcopy_file_fd (origin_filepath, fdest)
fdest.close ()
assert os.path.exists (dest_filepath)
# FOURTH, ensure we have only 1 resource
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 1)
# Clean the new file so the test directory is as before
log ("Remove and wait")
os.remove (dest_filepath)
self.system.tracker_miner_fs_wait_for_idle (MINER_FS_IDLE_TIMEOUT)
self.assertEquals (self.get_urn_count_by_url (dest_fileuri), 0)
if __name__ == "__main__":
ut.main()
| slowfranklin/tracker | tests/functional-tests/600-applications-camera.py | Python | gpl-2.0 | 12,057 |
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
class classParameterTest( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
".",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0,
)
)
self.parameters().addParameter(
IECore.ClassParameter(
name = "cp",
description = "",
searchPathEnvVar = "IECORE_OP_PATHS",
)
)
def doOperation( self, operands ) :
return IECore.IntData( 1 )
IECore.registerRunTimeTyped( classParameterTest )
| hradec/cortex | test/IECore/ops/classParameterTest/classParameterTest-1.py | Python | bsd-3-clause | 2,272 |
# Generated by Django 2.0.1 on 2018-02-14 13:42
import versatileimagefield.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0009_auto_20180123_1113'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=versatileimagefield.fields.VersatileImageField(blank=True, null=True, upload_to='shop/product/%Y/%m/%d', verbose_name='Image'),
),
migrations.AlterField(
model_name='productcatalog',
name='image',
field=versatileimagefield.fields.VersatileImageField(blank=True, null=True, upload_to='shop/catalog', verbose_name='Image'),
),
migrations.AlterField(
model_name='productimage',
name='image',
field=versatileimagefield.fields.VersatileImageField(upload_to='shop/product_images/%Y/%m/%d', verbose_name='Image'),
),
migrations.AlterField(
model_name='productlabel',
name='image',
field=versatileimagefield.fields.VersatileImageField(blank=True, null=True, upload_to='shop/label', verbose_name='Image'),
),
migrations.AlterField(
model_name='productmanufacturer',
name='image',
field=versatileimagefield.fields.VersatileImageField(blank=True, null=True, upload_to='shop/producer', verbose_name='Image'),
),
migrations.AlterField(
model_name='productsection',
name='image',
field=versatileimagefield.fields.VersatileImageField(blank=True, null=True, upload_to='shop/section', verbose_name='Image'),
),
]
| astrikov-d/dartcms | dartcms/apps/shop/migrations/0010_auto_20180214_1342.py | Python | mit | 1,727 |
#!/usr/bin/env python
'''Asterisk external test suite usage report
Copyright (C) 2016, Digium, Inc.
Scott Griepentrog <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
import sys
import yaml
try:
from yaml import CSafeLoader as MyLoader
except ImportError:
from yaml import SafeLoader as MyLoader
sys.path.append('lib/python')
TESTS_CONFIG = "tests.yaml"
TEST_CONFIG = "test-config.yaml"
class Test:
def __init__(self, path):
self.path = path
self.test_config = load_yaml_config("%s/%s" % (path, TEST_CONFIG))
properties = self.test_config.get('properties', {})
self.tags = properties.get('tags', ['none'])
self.dependencies = [repr(d)
for d in properties.get('dependencies', [])]
test_modules = self.test_config.get('test-modules', {})
test_objects = test_modules.get('test-object', {})
if not isinstance(test_objects, list):
test_objects = [test_objects]
self.test_objects = [obj.get('typename', 'test-run')
for obj in test_objects]
modules = test_modules.get('modules', {})
self.test_modules = [module.get('typename', '-error-')
for module in modules]
class TestSuite:
def __init__(self):
self.tests = self._parse_test_yaml("tests")
def _parse_test_yaml(self, test_dir):
tests = []
config = load_yaml_config("%s/%s" % (test_dir, TESTS_CONFIG))
if not config:
return tests
for t in config["tests"]:
for val in t:
path = "%s/%s" % (test_dir, t[val])
if val == "test":
tests.append(Test(path))
elif val == "dir":
tests += self._parse_test_yaml(path)
return tests
def unique(self, key):
result = set()
for test in self.tests:
result = result.union(getattr(test, key))
result = list(set(result))
result.sort(key=str.lower)
return result
def occurances(self, **kwargs):
match = []
for test in self.tests:
for key, value in kwargs.items():
if value in getattr(test, key):
match.append(test)
continue
return len(match)
def results_for(self, key):
print(key.title() + ":")
things = self.unique(key)
width = max(len(t) for t in things)
results = [(self.occurances(**{key: t}), t) for t in things]
results.sort(key=lambda tup: tup[0], reverse=True)
for (count, name) in results:
print("\t%-*s %5d" % (width, name, count))
print("")
def load_yaml_config(path):
"""Load contents of a YAML config file to a dictionary"""
try:
f = open(path, "r")
except IOError:
# Ignore errors for the optional tests/custom folder.
if path != "tests/custom/tests.yaml":
print("Failed to open %s" % path)
return None
except:
print("Unexpected error: %s" % sys.exc_info()[0])
return None
config = yaml.load(f, Loader=MyLoader)
f.close()
return config
def main(argv=None):
print("Testsuite Module Usage and Coverage Report")
print("")
test_suite = TestSuite()
print("Number of tests:", len(test_suite.tests))
print("")
test_suite.results_for('tags')
test_suite.results_for('test_objects')
test_suite.results_for('test_modules')
test_suite.results_for('dependencies')
if __name__ == "__main__":
sys.exit(main() or 0)
| asterisk/testsuite | usage.py | Python | gpl-2.0 | 3,716 |
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
import boto3
import time
def run(job, logger=None, **kwargs):
resource = kwargs.pop('resources').first()
selection_name = resource.name
region = resource.attributes.get(field__name='aws_region').value
rh_id = resource.attributes.get(field__name='aws_rh_id').value
backup_plan_id = resource.attributes.get(
field__name='backup_plan_id').value.split(',')[0]
selection_id = resource.attributes.get(
field__name='backup_selection_id').value
rh = AWSHandler.objects.get(id=rh_id)
set_progress('Connecting to Amazon backup service')
client = boto3.client('backup',
region_name=region,
aws_access_key_id=rh.serviceaccount,
aws_secret_access_key=rh.servicepasswd
)
set_progress('Deleting AWS Backup selection "{}"'.format(selection_name))
response = client.delete_backup_selection(
BackupPlanId=backup_plan_id,
SelectionId=selection_id,
)
while True:
try:
response = client.get_backup_selection(
BackupPlanId=backup_plan_id,
SelectionId=selection_id,
)
except client.exceptions.ResourceNotFoundException:
set_progress('Backup selection succesfully deleted')
break
return "SUCCESS", "Backup selection succesfully deleted", ""
| CloudBoltSoftware/cloudbolt-forge | blueprints/aws_backup_selection/delete.py | Python | apache-2.0 | 1,504 |
#!/usr/bin/python
import zerorpc
import os
import re
class SpaceAPI(object):
def updateStatus(self, labOpen, topic):
# Clean up to avoid massive security hole, if firewall fails
labOpenCleaned = re.sub(r'[^a-zA-Z0-9]',' ', labOpen)
print "Set lab open " + str(labOpenCleaned) + " and topic " + topic
os.system("~/updatestatus.sh " + str(labOpenCleaned) + " Foo")
return "Update ok"
s = zerorpc.Server(SpaceAPI())
s.bind("tcp://0.0.0.0:4242")
s.run()
| vranki/pajabot | spaceapi-server/spaceapi-server.py | Python | gpl-3.0 | 469 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import dataset
from importlib import import_module
from . import config
from . import helpers
# Module API
def cli(argv):
# Prepare conf dict
conf = helpers.get_variables(config, lambda x: x.isupper())
# Prepare conn dict
conn = {
'database': dataset.connect(config.DATABASE_URL),
'warehouse': dataset.connect(config.WAREHOUSE_URL),
'explorerdb': dataset.connect(config.EXPLORERDB_URL),
}
# Get and call processor
process = import_module('processors.%s' % argv[1]).process
process(conf, conn, *argv[2:])
if __name__ == '__main__':
cli(sys.argv)
| arthurSena/processors | processors/base/cli.py | Python | mit | 798 |
import typecat.font2img as f2i
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class FontBox(Gtk.FlowBoxChild):
def set_text(self, arg1):
if type(arg1) is str:
self.text = arg1
if type(arg1) is int:
self.font_size = arg1
try:
self.box.destroy()
except AttributeError:
pass
self.box = Gtk.Box()
self.box.set_border_width(5)
self.image = Gtk.Image(halign=Gtk.Align.CENTER)
self.font.set_size(self.font_size)
self.image.set_from_pixbuf(f2i.multiline_gtk(self.text, self.font.pilfont, self.size, background=self.bg, foreground=self.fg))
self.box.pack_start(self.image, True, False, 0)
self.frame.add(self.box)
self.show_all()
def __init__(self, font, text="Handgloves", size=(200, 150), font_size=75):
Gtk.FlowBoxChild.__init__(self)
self.frame = Gtk.Frame()
self.set_border_width(5)
self.font = font
self.font_size = int(size[0]/9)
self.font.set_size(self.font_size)
self.text = text
self.size = size
self.title = self.font.name if len(self.font.name) < 30 else self.font.name[:27] + "..."
self.frame.set_label(self.title)
self.frame.set_label_align(.1, 0)
entry = Gtk.Entry()
self.bg = (255, 255, 255)
self.fg = (0, 0, 0)
self.set_text(text)
self.add(self.frame)
| LordPharaoh/typecat | typecat/display/fontbox.py | Python | mit | 1,483 |
"""
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
pl.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
pl.axis('tight')
pl.show()
| jmargeta/scikit-learn | examples/neighbors/plot_nearest_centroid.py | Python | bsd-3-clause | 1,783 |
"""Configure repos to ignore some attributes, overruling '.gitattributes'.
For non-interactive uses of Git repositories, it can be undesirable to allow
the 'ident string' and other attribute functionality as there are some
edge-cases that may require manual intervention.
Provide support for writing the repo-global '.git/info/attributes' file such
that any enabling of 'ident strings' and some other features via
'.gitattributes' files will be ignored.
Note that this should have no effect on diffs or commits, it only affects the
content of files in the work tree. This content should not be relevant for
static inspection of the source but would be relevant for other uses, e.g.
automated builds.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlgitx_ignoreattributes
#
# Public Functions:
# is_repo_definitely_ignoring
# ensure_repo_ignoring
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import phlsys_fs
_REPO_ATTRIBUTES_PATH = '.git/info/attributes'
_REPO_ATTRIBUTES_TUPLE = (
'* -ident',
'* -eol',
'* -text',
'* -filter',
)
_REPO_ATTRIBUTES_CONTENT = "\n".join(_REPO_ATTRIBUTES_TUPLE)
def is_repo_definitely_ignoring(repo_path):
repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)
if not os.path.exists(repo_attributes_path):
return False
else:
# check the existing file
content = phlsys_fs.read_text_file(repo_attributes_path)
return content == _REPO_ATTRIBUTES_CONTENT
def ensure_repo_ignoring(repo_path):
if is_repo_definitely_ignoring(repo_path):
# nothing to do
return
repo_attributes_path = os.path.join(repo_path, _REPO_ATTRIBUTES_PATH)
# check that any existing file is compatible with the new contents we will
# write, i.e. it is a subset of the new content
if os.path.exists(repo_attributes_path):
contents = phlsys_fs.read_text_file(repo_attributes_path)
lines = contents.splitlines()
for l in lines:
stripped = l.strip()
if stripped and stripped not in _REPO_ATTRIBUTES_TUPLE:
# we won't try to do any sort of merging, just escalate
raise Exception(
"cannot merge attributes in existing file: {}".format(
repo_attributes_path))
# the file is exactly one of the existing attributes, we can merge
# correctly by overwriting it with our superset of attributes
phlsys_fs.write_text_file(
repo_attributes_path,
_REPO_ATTRIBUTES_CONTENT)
# -----------------------------------------------------------------------------
# Copyright (C) 2014-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| kjedruczyk/phabricator-tools | py/phl/phlgitx_ignoreattributes.py | Python | apache-2.0 | 3,718 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class AccountAnalyticAccount(models.Model):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
company_uom_id = fields.Many2one('uom.uom', related='company_id.project_time_mode_id', string="Company UOM", readonly=False)
project_ids = fields.One2many('project.project', 'analytic_account_id', string='Projects')
project_count = fields.Integer("Project Count", compute='_compute_project_count')
@api.multi
@api.depends('project_ids')
def _compute_project_count(self):
project_data = self.env['project.project'].read_group([('analytic_account_id', 'in', self.ids)], ['analytic_account_id'], ['analytic_account_id'])
mapping = {m['analytic_account_id'][0]: m['analytic_account_id_count'] for m in project_data}
for account in self:
account.project_count = mapping.get(account.id, 0)
@api.multi
def unlink(self):
projects = self.env['project.project'].search([('analytic_account_id', 'in', self.ids)])
has_tasks = self.env['project.task'].search_count([('project_id', 'in', projects.ids)])
if has_tasks:
raise UserError(_('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(AccountAnalyticAccount, self).unlink()
@api.multi
def action_view_projects(self):
kanban_view_id = self.env.ref('project.view_project_kanban').id
result = {
"type": "ir.actions.act_window",
"res_model": "project.project",
"views": [[kanban_view_id, "kanban"], [False, "form"]],
"domain": [['analytic_account_id', '=', self.id]],
"context": {"create": False},
"name": "Projects",
}
if len(self.project_ids) == 1:
result['views'] = [(False, "form")]
result['res_id'] = self.project_ids.id
return result
| t3dev/odoo | addons/hr_timesheet/models/analytic_account.py | Python | gpl-3.0 | 2,091 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAlsace(RPackage):
"""ALS for the Automatic Chemical Exploration of mixtures.
Alternating Least Squares (or Multivariate Curve Resolution) for
analytical chemical data, in particular hyphenated data where the first
direction is a retention time axis, and the second a spectral axis.
Package builds on the basic als function from the ALS package and adds
functionality for high-throughput analysis, including definition of time
windows, clustering of profiles, retention time correction, etcetera."""
homepage = "https://bioconductor.org/packages/alsace"
git = "https://git.bioconductor.org/packages/alsace.git"
version('1.20.0', commit='47f1cf8daafc864e5e3418009f349ce85d6b0389')
version('1.18.0', commit='c9fc43c7b441de43b14ef1be69926c4c4a566191')
version('1.16.0', commit='5a51a19aeccbba0123222201cb7a228559f29653')
version('1.14.0', commit='aebb13b00eb850f9569391c4c92183b55b70ae89')
version('1.12.0', commit='1364c65bbff05786d05c02799fd44fd57748fae3')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-als', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-alsace/package.py | Python | lgpl-2.1 | 1,415 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create pipeline
#
leader = vtk.vtkLeaderActor2D()
leader.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
leader.GetPositionCoordinate().SetValue(0.1, 0.1)
leader.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
leader.GetPosition2Coordinate().SetValue(0.75, 0.23)
leader.SetArrowStyleToFilled()
leader.SetLabel("")
leader2 = vtk.vtkLeaderActor2D()
leader2.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
leader2.GetPositionCoordinate().SetValue(0.9, 0.1)
leader2.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
leader2.GetPosition2Coordinate().SetValue(0.75, 0.83)
leader2.SetArrowStyleToOpen()
leader2.SetArrowPlacementToPoint1()
leader2.SetLabel("Leader2")
leader3 = vtk.vtkLeaderActor2D()
leader3.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
leader3.GetPositionCoordinate().SetValue(0.1, 0.9)
leader3.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
leader3.GetPosition2Coordinate().SetValue(0.6, 0.3)
leader3.SetArrowStyleToHollow()
leader3.SetLabel("Leader3")
leader4 = vtk.vtkLeaderActor2D()
leader4.GetPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
leader4.GetPositionCoordinate().SetValue(0.1, 0.75)
leader4.GetPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
leader4.GetPosition2Coordinate().SetValue(0.1, 0.25)
leader4.SetArrowPlacementToNone()
leader4.SetRadius(1.0)
leader4.SetLabel("Leader4")
leader4.AutoLabelOn()
# Create graphics stuff
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(leader)
ren1.AddActor(leader2)
ren1.AddActor(leader3)
ren1.AddActor(leader4)
renWin.SetSize(250, 250)
# render the image
#
renWin.Render()
#iren.Start()
| hlzz/dotfiles | graphics/VTK-7.0.0/Rendering/Core/Testing/Python/TestLeaderActor2D.py | Python | bsd-3-clause | 2,074 |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/", MainHandler),
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| movmov/cc | vendor/tornado/demos/helloworld/helloworld.py | Python | apache-2.0 | 1,223 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Modules used for ETL - Create User
# Modules required:
import os
import xmlrpclib, sys, csv, ConfigParser
from openerp.tools.status_history import status
from datetime import datetime
# -----------------------------------------------------------------------------
# Set up parameters (for connection to Open ERP Database)
# -----------------------------------------------------------------------------
# Startup from config file:
config = ConfigParser.ConfigParser()
file_config = os.path.expanduser('~/ETL/generalfood/openerp.cfg')
config.read([file_config])
dbname = config.get('dbaccess','dbname')
user = config.get('dbaccess','user')
pwd = config.get('dbaccess','pwd')
server = config.get('dbaccess','server')
port = config.get('dbaccess','port') # verify if it's necessary: getint
separator = eval(config.get('dbaccess','separator')) # test
log_only_error = eval(config.get('log','error')) # log only error in function
# Startup from code:
default_error_data = "2014/07/30"
default_product_id = 1921 # for lot creation (acceptation)
default_lot_id = 92710 # ERR
log_file = os.path.expanduser("~/ETL/generalfood/log/%s.txt" % (datetime.now()))
log = open(log_file, 'w')
# -----------------------------------------------------------------------------
# XMLRPC connection
# -----------------------------------------------------------------------------
sock = xmlrpclib.ServerProxy(
'http://%s:%s/xmlrpc/common' % (server, port), allow_none=True)
uid = sock.login(dbname ,user ,pwd)
sock = xmlrpclib.ServerProxy(
'http://%s:%s/xmlrpc/object' % (server, port), allow_none=True)
# -----------------------------------------------------------------------------
# Utility function
# -----------------------------------------------------------------------------
def format_string(valore):
try:
valore = valore.decode('cp1252')
except:
tmp = ""
for i in valore:
try:
tmp += i.decode('cp1252')
except:
pass # jump char
valore = tmp
valore = valore.encode('utf-8')
return valore.strip()
def format_date(valore,date=True):
''' Formatta nella data di PG
'''
try:
if date:
mga = valore.strip().split(' ')[0].split('/') # only date (not time)
year = int(mga[2])
if year < 100:
if year > 50:
year += 1900
else:
year += 2000
return '%4d-%02d-%02d' % (year, int(mga[0]), int(mga[1]))
except:
return False
def format_currency(valore):
''' Formatta nel float per i valori currency
'''
try:
return float(valore.strip().split(' ')[-1].replace(',','.'))
except:
return 0.0
def format_boolean(value):
''' Formatta le stringhe '0' e '1' in boolean True e False
'''
return value == '1'
def log_event(*event):
''' Log event and comunicate with print
'''
if log_only_error and event[0][:5] == "[INFO":
return
log.write("%s. %s\r\n" % (datetime.now(), event))
print event
return
def create_partner(partner_code, type_of_partner, default_dict):
''' Create simple element for partner not found
(write after in default_dict new element)
'''
try:
field = "sql_%s_code" % type_of_partner
partner_ids = sock.execute(dbname, uid, pwd, "res.partner", "search",
[(field, '=', partner_code)])
if partner_ids:
partner_id = partner_ids[0]
else:
data = {
'name': "Partner %s (from migration)" % (partner_code),
field: partner_code,
'sql_import': True,
}
if type_of_partner == 'customer':
data['ref'] = partner_code
data['customer'] = True
elif type_of_partner == 'supplier':
data['supplier'] = True
elif type_of_partner == 'destination':
data['is_address'] = True
partner_id = sock.execute(dbname, uid, pwd, "res.partner",
'create', data)
log_event("[WARN] %s partner created: %s" % (type_of_partner, partner_code))
default_dict[partner_code] = partner_id
return partner_id
except:
log_event("[ERROR] Error creating %s partner: %s" % (type_of_partner, partner_code))
return False
def get_or_create_partner(partner_code, type_of_partner, mandatory, res_partner_customer, res_partner_supplier):
''' Try to get partner element or create a simple element if not present
'''
if type_of_partner == 'customer':
default_dict = res_partner_customer
elif type_of_partner == 'supplier':
default_dict = res_partner_supplier
elif type_of_partner == 'destination':
default_dict = res_partner_customer # search in customer dict
else:
default_dict = {} # nothing
partner_id = default_dict.get(partner_code, False)
if not partner_id: # create e simple element
partner_id = create_partner(partner_code, type_of_partner, default_dict)
if mandatory and not partner_id:
log_event("[ERROR] %s partner not found: %s" % (
type_of_partner, partner_code))
return partner_id
# -----------------------------------------------------------------------------
# Importazioni qualifiche fornitore
# -----------------------------------------------------------------------------
qualifications = {
'1': 'full', # Piena qualitica
'2': 'reserve', # Con riserva
'3': 'discarded', # Scartato
'4': 'uneventful', # Non movimentato
'5': 'test', # In prova
'6': 'occasional', # Occasionale
}
# -----------------------------------------------------------------------------
# Importazioni comunicazioni
# -----------------------------------------------------------------------------
comunications = {
'1': 1, # Cliente
'2': 2, # Fornitore
'3': 3, # ASL
}
# -----------------------------------------------------------------------------
# Importazioni gravità
# -----------------------------------------------------------------------------
gravity = {
'1': 2, # Grave
'2': 3, # Importante
'3': 1, # Secondario
}
# -----------------------------------------------------------------------------
# Importazioni origin
# -----------------------------------------------------------------------------
origin = {
'1': 1, # Ordine
'2': 2, # Magazzino
'3': 3, # Fornitore
'4': 4, # Cliente
'5': 5, # Trasporto
'6': 6, # Fatturazione
'7': 7, # Non definibile
'8': 8, # Commerciale
'9': 9, # Logistica
'10': 10, # Confezionamento
'11': 11, # Acquisti
}
# -----------------------------------------------------------------------------
# Importazioni cause
# -----------------------------------------------------------------------------
cause = {
'1': 1, # Igiene
'2': 2, # Qualità
'3': 3, # Quantità
'4': 4, # Ritardo
'5': 5, # Prodotto sbagliato
'6': 6, # Confezione
'7': 7, # Errore cliente
'8': 8, # Prezzo
'9': 9, # Non definibile
'10': 10, # Glassatura
'11': 11, # Temperatura
'12': 12, # Pezzatura
'13': 13, # Corpi estranei/Contaminati
'14': 14, # Mancanza prodotto risp a bolla
'15': 15, # Rottura di stock
}
# -----------------------------------------------------------------------------
# Importazioni Sampling plan
# -----------------------------------------------------------------------------
plan = {
'1': 1, # Bieta erbetta
'3': 2, # Broccoli calabri IGF
'4': 3, # Carote Baby e rondelle
'6': 4, # Cavolfiore
'7': 5, # Carciofi
'9': 6, # Patate crocchette
'11': 7, # Fagiolini
'12': 8, # Finocchi
'13': 9, # Minestrone
'16': 10, # Patate
'18': 11, # Piselli
'19': 12, # Spinaci
'20': 13, # Zucchine
'21': 14, # Halibut
'22': 15, # Bastoncini
'23': 16, # Calamari
'25': 17, # Cozze
'26': 18, # Merluzzo
'27': 19, # Palombo
'28': 20, # Platessa
'29': 21, # Seppie
'30': 22, # Trota
'31': 23, # Coscette pollo
'32': 24, # Pollo
'33': 25, # Suino
'35': 26, # Peperoni
'38': 27, # Tacchino
'39': 28, # Asparagi
'40': 29, # Macinato
'41': 30, # Pesce spada
'42': 31, # Mais
'43': 32, # Pangasio
'44': 33, # Aromi e sedano
}
# -----------------------------------------------------------------------------
# Importazioni Origin (action) >> (Uso stessa anagrafica per camp.)
# -----------------------------------------------------------------------------
origin_action = {
'1': 'direction', # Riesame della direzione
'2': 'audit', # Audit interno
'3': 'claim', # Reclamo
'4': 'nc', # Rapporto di non conformità
'5': 'other', # Altro
}
stock_production_lot = {}
lot_ids = sock.execute(dbname, uid, pwd, 'stock.production.lot', 'search', [])
for lot in sock.execute(dbname, uid, pwd, 'stock.production.lot', 'read', lot_ids, ['id','name']):
stock_production_lot[lot['name']] = lot['id']
# -----------------------------------------------------------------------------
# Importazione Classi fornitore
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Classi.txt')
openerp_object = 'quality.partner.class'
log_event("Start import %s" % openerp_object)
quality_partner_class = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
continue
if len(line):
access_id = line[0]
name = format_string(line[1])
# Start of importation:
counter['tot'] += 1
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'name': name,
'access_id': access_id,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event("[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
name)
else:
item_mod = sock.execute(dbname, uid, pwd,
openerp_object, 'write', item, data)
log_event(
"[INFO]", counter['tot'], "Write", openerp_object,
name)
quality_partner_class[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id=sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create", openerp_object, name)
quality_partner_class[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise #Exception("Errore di importazione!") # Scrivo l'errore per debug
store = status(openerp_object)
if jump_because_imported:
quality_partner_class = store.load()
else:
store.store(quality_partner_class)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Importazione Clienti
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Clienti.txt')
openerp_object = 'res.partner'
log_event("Start import %s (customer)" % openerp_object)
res_partner_customer = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
continue
if len(line):
access_c_id = line[0]
code = format_string(line[1])
name = format_string(line[2])
# Start of importation:
counter['tot'] += 1
# test if record exists (basing on Ref. as code of Partner)
if code[:2] == '06':
search_key = 'sql_customer_code'
destination = False
else:
search_key = 'sql_destination_code'
destination = True
item = sock.execute(
dbname, uid, pwd, openerp_object , 'search', [
#('access_c_id', '=', access_c_id),
(search_key, '=', code),
])
if not item:
log_event(
"[WARNING] Customer/Destination not found "
"(must be yet imported)", data, )
# continue # TODO lo creo lo stesso per ora
data = {
'name': "%s%s" % (name, "" if item else " [*]"), # Creato da importazione)
'is_company': True,
'access_c_id': access_c_id,
'customer': True,
# for link sql importation
search_key: code, #'sql_customer_code'
'sql_import': True,
}
if destination:
data['is_address'] = True
# parent_id = ?? TODO
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "No Write", openerp_object,
" (jumped only_create clause: ", code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write', item,
data)
log_event(
"[INFO]", counter['tot'], "Write", openerp_object,
code)
res_partner_customer[code] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create", openerp_object,
code)
res_partner_customer[code] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
store = status('%sc' % openerp_object)
if jump_because_imported:
res_partner_customer = store.load()
else:
store.store(res_partner_customer)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Importazione Fornitori
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Fornitori.txt')
openerp_object = 'res.partner'
log_event("Start import %s (supplier)" % openerp_object)
res_partner_supplier = {}
lines = csv.reader(open(file_input,'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % (
counter['tot'], max_col, len(line), line))
continue
access_s_id = line[0]
code = format_string(line[1])
name = format_string(line[2])
quality_class_code = format_string(line[3])
quality_activity = format_string(line[11])
quality_product = format_string(line[12])
quality_rating_info = format_string(line[13])
quality_commercial_reference = format_string(line[14])
quality_update_date = format_date(line[15])
quality_start_supplier = format_date(line[33])
quality_end_supplier = format_date(line[34])
quality_class_id = quality_partner_class.get(
quality_class_code, False)
# Start of importation:
counter['tot'] += 1
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(
dbname, uid, pwd, openerp_object , 'search', [
#('access_s_id', '=', access_s_id),
('sql_supplier_code', '=', code),
])
if not item:
log_event(
"[WARNING] Supplier not found (must be yet imported)",
data, )
#continue
data = {
'name': name,
'is_company': True,
'access_s_id': access_s_id,
'supplier': True,
'quality_class_id': quality_class_id,
'quality_activity': quality_activity,
'quality_product': quality_product,
'quality_rating_info': quality_rating_info,
'quality_commercial_reference': quality_commercial_reference,
'quality_update_date': quality_update_date,
'quality_start_supplier': quality_start_supplier,
'quality_end_supplier': quality_end_supplier,
# for link sql importation
'sql_supplier_code': code,
'sql_import': True,
}
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write", openerp_object,
" (jumped only_create clause: ", code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write', item,
data)
log_event(
"[INFO]", counter['tot'], "Write", openerp_object,
code)
#res_partner_supplier[access_s_id] = item[0]
res_partner_supplier[code] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create", openerp_object,
code)
#res_partner_supplier[access_s_id] = openerp_id
res_partner_supplier[code] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
store = status('%ss' % openerp_object)
if jump_because_imported:
res_partner_supplier = store.load()
else:
store.store(res_partner_supplier)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Importazione Qualifiche fornitore
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Qualifiche.txt')
openerp_object = 'quality.supplier.rating'
log_event("Start import %s" % openerp_object)
# Non storicizzati in dict
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
counter['tot'] += 1
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
supplier_code = format_string(line[1])
qualification_code = format_string(line[2])
name = format_string(line[3])
date = format_date(line[4])
type_code = format_string(line[5]).upper()
deadline = format_date(line[6])
obsolete = format_boolean(line[7])
# Convert foreign key:
if type_code == "P":
type_id = 'first'
elif type_code == 'R':
type_id = 'renewal'
else:
type_id = False
partner_id = res_partner_supplier.get(supplier_code, False)
if not partner_id: # Creo se non esiste
partner_id = get_or_create_partner(supplier_code,
'supplier', True, res_partner_customer,
res_partner_supplier)
if not partner_id:
log_event("[ERROR] Partner not found, jumped! %s" % (line))
continue
qualification = qualifications.get(qualification_code, False)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'name': name,
'date': date,
'type': type_id,
'deadline': deadline,
'obsolete': obsolete,
'qualification': qualification,
'partner_id': partner_id,
'access_id': access_id,
}
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
supplier_code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, supplier_code)
#quality_claim[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id=sock.execute(
dbname, uid, pwd, openerp_object, 'create',
data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, name)
#quality_claim[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Certificazioni fornitore
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Certificazioni.txt')
openerp_object = 'quality.supplier.certification'
log_event("Start import %s" % openerp_object)
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
counter['tot'] += 1
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
supplier_code = format_string(line[1])
entity = format_date(line[2])
rule = format_string(line[3])
note = format_string(line[4]) # purpose
date = format_date(line[5])
deadline = format_date(line[6])
number = format_string(line[7])
# Convert foreign key:
partner_id = res_partner_supplier.get(supplier_code, False)
if not partner_id:
partner_id = get_or_create_partner(supplier_code,
'supplier', True, res_partner_customer,
res_partner_supplier)
if not partner_id:
log_event("[ERROR] Partner not found, jumped! %s" % (line))
continue
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'date': date,
'entity': entity,
# 'name': # TODO esiste???
'deadline': deadline,
'note': note,
'rule': rule,
'number': number,
'partner_id': partner_id,
'access_id': access_id,
}
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
supplier_code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, supplier_code)
#quality_claim[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, supplier_code)
#quality_claim[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Referenze - Andamenti Qualifiche fornitore
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Andamenti.txt')
openerp_object = 'quality.supplier.reference'
log_event("Start import %s" % openerp_object)
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
counter['tot'] += 1
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
supplier_code = format_string(line[1])
date = format_date(line[2])
note = format_string(line[3])
# Convert foreign key:
partner_id = res_partner_supplier.get(supplier_code, False)
if not partner_id:
partner_id = get_or_create_partner(supplier_code,
'supplier', True, res_partner_customer,
res_partner_supplier)
if not partner_id:
log_event("[ERROR] Partner not found, jumped! %s" % (line))
continue
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
#'name': name, # TODO non esiste!!!
'date': date,
'note': note,
'partner_id': partner_id,
'access_id': access_id,
}
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
supplier_code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, supplier_code)
#quality_claim[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, supplier_code)
#quality_claim[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# Verifiche fornitore
# -----------------------------------------------------------------------------
only_create = True
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Verifiche.txt')
openerp_object = 'quality.supplier.check'
log_event("Start import %s" % openerp_object)
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
counter['tot'] += 1
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
supplier_code = format_string(line[1])
date = format_date(line[2])
name = format_string(line[3])
note = format_string(line[4])
# Convert foreign key:
partner_id = res_partner_supplier.get(supplier_code, False)
if not partner_id:
partner_id = get_or_create_partner(supplier_code,
'supplier', True, res_partner_customer,
res_partner_supplier)
if not partner_id:
log_event("[ERROR] Partner not found, jumped! %s" % (line))
continue
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'date': date,
'name': name,
'note': note,
'partner_id': partner_id,
'access_id': access_id,
}
if item:
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
supplier_code)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, supplier_code)
#quality_claim[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else:
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, supplier_code)
#quality_claim[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# PRECARICAMENTI
# -----------------------------------------------------------------------------
# RECLAMI ---------------------------------------------------------------------
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Rapporti.txt')
openerp_object = 'quality.claim'
log_event("Start preload import %s" % openerp_object)
quality_claim = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
if not jump_because_imported:
try:
for line in lines:
counter['tot'] += 1
if counter['tot'] <= 0:
continue
if len(line):
access_id = line[0]
ref = "REC%05d" % (int(format_string(line[1]) or '0'))
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'name': ref, # TODO not correct
'access_id': access_id,
'partner_id': 1, # TODO not correct
}
if item:
quality_claim[access_id] = item[0]
else:
try:
quality_claim[access_id] = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event("[INFO] %s. Create %s ref: %s" % (
counter['tot'], openerp_object, ref))
except:
log_event(
"[ERROR] Error creating, record: %s " % line)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_claim = store.load()
else:
store.store(quality_claim)
log_event("Total %(tot)s" % counter)
# NON CONFORMITA' -------------------------------------------------------------
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Conformità.txt')
openerp_object = 'quality.conformed'
log_event("Start preload import %s" % openerp_object)
quality_conformed = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
if not jump_because_imported:
try:
for line in lines:
try:
counter['tot'] += 1
if counter['tot'] <= 0:
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % (
counter['tot'], max_col, len(line), counter['tot']))
continue
access_id = line[0]
ref = "NC%05d" % (int(format_string(line[4]) or '0'))
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'access_id': access_id,
'gravity_id': 2, #TODO da correggere
}
if item:
quality_conformed[access_id] = item[0]
else:
try:
quality_conformed[access_id] = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event("[INFO] %s. Create %s ref: %s" % (
counter['tot'], openerp_object, ref))
except:
log_event(
"[ERROR] Error creating, record: %s " % line)
except:
log_event('[ERROR] %s. Error importing data: %s' % (counter['tot'], sys.exc_info()))
continue
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_conformed = store.load()
else:
store.store(quality_conformed)
log_event("Total %(tot)s" % counter)
# CAMPIONAMENTI ---------------------------------------------------------------
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Campionatura.txt')
openerp_object = 'quality.sampling'
log_event("Start preload import %s" % openerp_object)
quality_sampling = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
if not jump_because_imported:
try:
for line in lines:
counter['tot'] += 1
if counter['tot'] <= 0:
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % (
counter['tot'], max_col, len(line), counter['tot']))
continue
access_id = line[0]
ref = "SAM%05d" % (int(format_string(line[4]) or '0'))
fake_lot = 91131
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'access_id': access_id,
'date': '2014-06-25',
'lot_id': fake_lot,
}
if item:
quality_sampling[access_id] = item[0]
else:
try:
quality_sampling[access_id] = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event("[INFO] %s. Create %s ref: %s" % (
counter['tot'], openerp_object, ref))
except:
log_event(
"[ERROR] Error creating, record: %s " % line)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_sampling = store.load()
else:
store.store(quality_sampling)
log_event("Total %(tot)s" % counter)
# AZIONI ---------------------------------------------------------------
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Azioni.txt')
openerp_object = 'quality.action'
log_event("Start preload import %s" % openerp_object)
quality_action = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
if not jump_because_imported:
try:
for line in lines:
counter['tot'] += 1
if counter['tot'] <= 0:
continue
if len(line):
access_id = line[0]
ref = "ACP%05d" % (int(format_string(line[1]) or '0'))
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'access_id': access_id,
}
if item:
quality_action[access_id] = item[0]
else:
try:
quality_action[access_id] = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event("[INFO] %s. Create %s ref: %s" % (
counter['tot'], openerp_object, ref))
except:
log_event(
"[ERROR] Error creating, record: %s " % line)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_action = store.load()
else:
store.store(quality_action)
log_event("Total %(tot)s" % counter)
# -----------------------------------------------------------------------------
# RECLAMI
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Rapporti.txt')
openerp_object = 'quality.claim'
log_event("Start import %s" % openerp_object)
quality_claim = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
old_claim = False
try:
lot = {1: {}, 2: {}, 3: {}, }
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
continue
if len(line):
access_id = line[0]
name = format_string(line[1])
date = format_date(line[2])
partner_code = format_string(line[3])
partner_ref = format_string(line[6])
receive_user_code = format_string(line[12])
subject = format_string(line[13])
request_return = format_boolean(line[14])
RTR_request = format_boolean(line[16])
analysis = format_string(line[17])
origin_code = format_string(line[36])
cause_code = format_string(line[37])
responsability = format_string(line[38])
solution = format_string(line[39])
gravity_code = format_string(line[40])
need_accredit = format_boolean(line[41])
SFA_saw = format_boolean(line[42])
NC_ref = format_string(line[43])
closed_date = format_date(line[46])
action_code = format_string(line[57])
sampling_code = format_string(line[60])
ref_claim = int(name or '0')
if not old_claim:
old_claim = ref_claim
else:
old_claim += 1
if old_claim != ref_claim:
log_event("[ERROR] old_rec=%s rec_claim=%s (hole in list)" % (
old_claim, ref_claim))
old_claim = ref_claim
ref = "REC%05d" % (ref_claim)
customer_ref = False # non esiste il codice di rif NC cliente?
if need_accredit and not NC_ref:
NC_ref = "Nessun riferimento"
lot[1]['lot'] = format_string(line[20])
lot[2]['lot'] = format_string(line[26])
lot[3]['lot'] = format_string(line[32])
lot[1]['product'] = format_string(line[23])
lot[2]['product'] = format_string(line[29])
lot[3]['product'] = format_string(line[35])
lot[1]['supplier'] = format_string(line[21])
lot[2]['supplier'] = format_string(line[27])
lot[3]['supplier'] = format_string(line[33])
lot[1]['date'] = format_date(line[18])
lot[2]['date'] = format_date(line[24])
lot[3]['date'] = format_date(line[30])
lot[1]['qty_return'] = format_currency(line[19])
lot[2]['qty_return'] = format_currency(line[25])
lot[3]['qty_return'] = format_currency(line[31])
receive_user_id = 1
# Anagrafiche semplici:
origin_id = origin.get(origin_code, False)
cause_id = cause.get(cause_code, False)
gravity_id = gravity.get(gravity_code, False)
# Documenti collegati:
action_id = quality_action.get(action_code, False)
sampling_id = quality_sampling.get(sampling_code, False)
# Trova partner ed eventuale destinazione
partner_id = False
partner_address_id = False
if partner_code[:2] == '06':
partner_id = get_or_create_partner(partner_code, 'customer',
False, res_partner_customer, res_partner_supplier)
elif partner_code[:2] == '07':
partner_address_id = get_or_create_partner(partner_code,
'destination', False, res_partner_customer,
res_partner_supplier)
partner_id = partner_address_id # TODO cercare il partner della destinazione
if not partner_id:
partner_id = 1
log_event("[WARNING] [%s] Correggere il partner, reclamo: %s" % (
ref, partner_code))
# Start of importation:
counter['tot'] += 1
data = {
'name': "%s..." % subject[:50],
'ref': ref,
'customer_ref': customer_ref, # codice cliente della NC (non esiste)
'date': date,
'receive_user_id': receive_user_id,
'subject': subject,
'analysis': analysis,
'responsability': responsability,
'solution': solution,
'partner_id': partner_id,
'partner_ref': partner_ref, # contatto dal cliente
'partner_address_id': partner_address_id,
'request_return': request_return,
'RTR_request': RTR_request,
'NC_ref': NC_ref,
'SFA_saw': SFA_saw,
'origin_id': origin_id,
'cause_id': cause_id,
'gravity_id': gravity_id,
'closed_date': closed_date,
'action_id': action_id,
'sampling_id': sampling_id,
'need_accredit': need_accredit,
'access_id': access_id,
}
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
ref)
else:
try:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, ref)
except:
log_event(
"[ERR] %s Write data %s", counter['tot'], data)
quality_claim[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, ref)
quality_claim[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
if action_id:
sock.execute(dbname, uid, pwd, 'quality.action', 'write', action_id, {
'claim_id' : quality_claim[access_id], 'origin': 'claim',
})
if sampling_id:
sock.execute(dbname, uid, pwd, 'quality.sampling', 'write', sampling_id, {
'claim_id' : quality_claim[access_id], 'origin': 'claim',
})
# NOTE: NC nel vecchio programma non c'erano quindi non sono state aggiornate le genesi
#importazione dei lotti
for key in lot:
try:
lot_name = lot[key]['lot'] # number
if lot_name and lot_name != '0':
lot_id = stock_production_lot.get(lot_name)
if not lot_id:
#log_event("[ERROR] No Lot, jump: %s" % lot_name) # no comunication
continue
lot_access_id = '%s%s' % (access_id, key)
data = {
'lot_id': lot_id,
'return_date': lot[key]['date'],
'return_qty': lot[key]['qty_return'],
'claim_id': quality_claim[access_id],
'real_lot_id': lot_id,
'access_id': lot_access_id,
}
lot_id = sock.execute(dbname, uid, pwd,
'quality.claim.product' , 'search', [
('access_id', '=', lot_access_id)])
else:
#log_event("[ERROR] No Lot, jump: %s" % lot_name) # no comunication
continue
except:
log_event("[ERROR] generic error (lot part) %s" % (
sys.exc_info()))
continue
if lot_id: # already exist
try:
sock.execute(
dbname, uid, pwd, 'quality.claim.product', 'write',
lot_id, data)
except:
log_event("[ERROR] Modifing lot %s [%s]" % (
key, data))
else: # new
try:
sock.execute(
dbname, uid, pwd, 'quality.claim.product', 'create', data)
except:
log_event(
"[ERROR] Error creating lot %s [%s]" % (
key, data))
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_claim = store.load()
else:
store.store(quality_claim)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# NOT CONFORMED
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Conformità.txt')
openerp_object = 'quality.conformed'
log_event("Start import %s" % openerp_object)
quality_conformed = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
treatment = {
1: {'type': 'accept_exception'},
2: {'type': 'discard'},
3: {'type': 'make_supplier'},
}
comunication = {
1: {'type': 1}, # Customer
2: {'type': 2}, # Supplier
3: {'type': 3}, # ASL
}
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
counter['tot'] += 1
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped: %s" % (
counter['tot'], max_col, len(line), line))
continue
access_id = line[0]
sampling_code = format_string(line[1])
action_code = format_string(line[2])
ref = "NC%05d" % (int(format_string(line[4]) or '0'))
insert_date = format_date(line[5])
quantity = format_boolean(line[6])
sanitation = format_boolean(line[7])
aesthetic_packaging = format_boolean(line[8])
name = format_string(line[9])
# origin = format_string(line[9]) # TODO (posizione?)
#genesis_1 = format_boolean(line[11])
#genesis_2 = format_boolean(line[12])
treatment[1]['treatment'] = format_boolean(line[13])
treatment[2]['treatment'] = format_boolean(line[14])
treatment[3]['treatment'] = format_boolean(line[15])
treatment[1]['qty'] = format_currency(line[18])
treatment[2]['qty'] = format_currency(line[19])
treatment[3]['qty'] = format_currency(line[20])
treatment[1]['note'] = format_string(line[21])
treatment[2]['note'] = format_string(line[22])
treatment[3]['note'] = format_string(line[23])
comunication[1]['comunication'] = format_boolean(line[25]) # Cli
comunication[2]['comunication'] = format_boolean(line[24]) # For
comunication[3]['comunication'] = format_boolean(line[26]) # ASL
comunication[1]['protocol'] = format_string(line[29]) # Cli
comunication[2]['protocol'] = format_string(line[27]) # For
comunication[3]['protocol'] = format_string(line[28]) # ASL
note_RAQ = format_string(line[30])
lot_code = format_string(line[33])
ddt_ref = format_string(line[34])
#genesis_3 = format_boolean(line[36])
cancel = format_boolean(line[37])
stock_note = format_string(line[38])
#genesis_4 = format_boolean(line[39])
gravity_code = format_string(line[40])
sampling_id = quality_sampling.get(sampling_code, False)
action_id = quality_action.get(action_code, False)
gravity_id = gravity.get(gravity_code, 2) #TODO da cambiare il default
lot_id = stock_production_lot.get(lot_code)
if not lot_id:
log_event("[ERROR] %s Lot not found %s, temp replaced ID=%s" % (
counter['tot'], lot_code, ref))
lot_id = default_lot_id
'''if genesis_1:
genesis = 'acceptance'
elif genesis_2:
genesis = 'sample'
elif genesis_3:
genesis = 'claim'
elif genesis_4:
genesis = 'packaging'
else:
genesis = 'other'
'''
# Start of importation:
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'name': name,
'ref': ref,
'insert_date': insert_date,
'aesthetic_packaging': aesthetic_packaging,
'quantity': quantity,
'sanitation': sanitation,
'gravity_id': gravity_id,
#'genesis': genesis, #TODO Spostare tutto nel campo origin
#'origin': origin, #TODO da ricavare alla fine
'ddt_ref': ddt_ref,
'lot_id': lot_id,
'note_RAQ': note_RAQ,
'cancel': cancel,
#'claim_id': claim_id,
'sampling_id': sampling_id,
#'acceptation_id': acceptation_id,
'action_id': action_id,
'access_id': access_id,
'stock_note': stock_note,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
name)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write', item, data)
log_event(
"[INFO]", counter['tot'], "Write", openerp_object, name)
quality_conformed[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", counter['tot'], data)
continue
else: # new
counter['new'] += 1
try:
openerp_id=sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, name)
quality_conformed[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ",
counter['tot'], data)
continue
if action_id:
sock.execute(dbname, uid, pwd, 'quality.action', 'write', action_id, {
'conformed_id' : quality_conformed[access_id], # non è parent_
'origin': 'nc', # TODO corretto?
})
if sampling_id: # corretto manualmente
sock.execute(dbname, uid, pwd, 'quality.sampling', 'write', sampling_id, {
'parent_conformed_id' : quality_conformed[access_id],
'origin': 'nc', # TODO corretto?
})
#Creazione trattamenti:
for key in treatment:
if treatment[key]['treatment']:
treat_access_id = '%s%s' % (access_id, key)
data = {
'type': treatment[key]['type'],
'name': treatment[key]['note'],
'qty': treatment[key]['qty'],
'conformed_id': quality_conformed[access_id],
'access_id': treat_access_id,
}
treat_id = sock.execute(dbname, uid, pwd, 'quality.treatment' , 'search', [
('access_id', '=', treat_access_id)])
if treat_id: # already exist
try:
sock.execute(
dbname, uid, pwd, 'quality.treatment', 'write',
treat_id, data)
except:
log_event("[ERROR] Modifing treat%s" % key)
else: # new
try:
sock.execute(
dbname, uid, pwd, 'quality.treatment', 'create', data)
except:
log_event(
"[ERROR] Error creating treat%s" % key)
#Creazione comunicazioni
for key in comunication:
if comunication[key]['comunication']:
comunication_access_id = '%s%s' % (access_id, key)
data = {
'type_id': comunication[key]['type'],
'prot_number': comunication[key]['protocol'],
'prot_date': insert_date,
'conformed_id': quality_conformed[access_id],
'access_id': comunication_access_id,
}
comunication_id = sock.execute(dbname, uid, pwd, 'quality.comunication' , 'search', [
('access_id', '=', comunication_access_id)])
if comunication_id: # already exist
try:
sock.execute(
dbname, uid, pwd, 'quality.comunication', 'write',
comunication_id, data)
except:
log_event("[ERROR] Modifing comunication%s" % key)
else: # new
try:
sock.execute(
dbname, uid, pwd, 'quality.comunication', 'create', data)
except:
log_event(
"[ERROR] Error creating comunication%s" % key)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_conformed = store.load()
else:
store.store(quality_conformed)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# CAMPIONAMENTI
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Campionatura.txt')
openerp_object = 'quality.sampling'
log_event("Start import %s" % openerp_object)
quality_sampling = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
sample_passed = []
sample_notpassed = []
tasters = {1: '', 2: '', 3: '', 4: ''}
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jump:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
closed = format_boolean(line[1]) # closed (sample)
ref = format_string(line[2])
date = format_date(line[3])
lot_code = format_string(line[4])
# Spunta per fare l'esame:
do_visual = format_boolean(line[8]) # ex 8
do_analysis = format_boolean(line[9]) # ex 10
do_taste = format_boolean(line[10]) # ex 9
do_glazing = format_boolean(line[11]) # ex 11
# Spunta per esito esame:
visual_state = format_boolean(line[12]) # ex 12
analysis_state = format_boolean(line[13])# ex 14
taste_state = format_boolean(line[14]) # ex 13
glazing_state = format_boolean(line[15]) # ex 15
# Descrizioni esami:
analysis = format_string(line[16])
taste = format_string(line[17])
visual = format_string(line[18])
weight_glazing = format_currency(line[19])
weight_drained = format_currency(line[20])
perc_glazing_indicated = format_currency(line[21])
perc_glazing_calculated = format_currency(line[22])
# Assaggiatori:
tasters[1] = format_string(line[23])
tasters[2] = format_string(line[24])
tasters[3] = format_string(line[25])
tasters[4] = format_string(line[26])
passed = format_boolean(line[27]) # passed (sample)
note = format_string(line[29])
conformed_code = format_string(line[36])
cancel = format_boolean(line[38])
sampling_plan_code = format_string(line[39])
ref = "SAM%05d" % (int(ref or '0'))
lot_id = stock_production_lot.get(lot_code, False)
if not lot_id:
log_event("[ERROR] %s Lot not found (replaced with temp raplaced ID=%s) %s" % (
counter['tot'], lot_code, ref))
lot_id = default_lot_id
conformed_id = quality_conformed.get(conformed_code, False)
sampling_plan_id = plan.get(sampling_plan_code, False)
if not date:
date = data.get('date', default_error_data)
# Start of importation:
counter['tot'] += 1
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'date': date,
'lot_id': lot_id,
#'origin': origin, TODO (vedere se ricavabile per ora ci sono solo i reclami)
'conformed_id': conformed_id,
# Check to do:
'do_visual': do_visual,
'do_analysis': do_analysis,
'do_glazing': do_glazing,
'do_taste': do_taste,
# Text info:
'visual': visual,
'analysis': analysis,
'taste': taste,
'weight_glazing': weight_glazing,
'perc_glazing_indicated': perc_glazing_indicated,
'weight_drained': weight_drained,
'perc_glazing_calculated': perc_glazing_calculated,
'note': note,
'sampling_plan_id': sampling_plan_id,
'cancel': cancel,
'access_id': access_id,
}
if closed:
data['visual_state'] = 'passed' if visual_state else 'not_passed'
data['analysis_state'] = 'passed' if analysis_state else 'not_passed'
data['taste_state'] = 'passed' if taste_state else 'not_passed'
data['glazing_state'] = 'passed' if glazing_state else 'not_passed'
else:
data['visual_state'] = 'passed' if visual_state else 'to_examined'
data['analysis_state'] = 'passed' if analysis_state else 'to_examined'
data['taste_state'] = 'passed' if taste_state else 'to_examined'
data['glazing_state'] = 'passed' if glazing_state else 'to_examined'
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
ref)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, ref)
quality_sampling[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, ref)
quality_sampling[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
if conformed_id:
sock.execute(dbname, uid, pwd, 'quality.conformed', 'write', conformed_id, {
'parent_sampling_id' : quality_sampling[access_id],
'origin': 'sampling',
})
# Aggiunta assaggiatori:
for taste_id, taster in tasters.iteritems():
if taster:
taster_access_id = "%s%s" % (access_id, taste_id)
data = {
'name': taster,
'sample_id': quality_sampling[access_id] ,
'access_id': taster_access_id,
}
taster_ids = sock.execute(dbname, uid, pwd, 'quality.sampling.taster', 'search', [
('access_id', '=', taster_access_id)])
if taster_ids:
taster_ids = sock.execute(dbname, uid, pwd,
'quality.sampling.taster' , 'write', taster_ids[0], data)
else:
taster_ids = sock.execute(dbname, uid, pwd,
'quality.sampling.taster', 'create', data)
if closed: # test for WF (end of importation)
if passed:
sample_passed.append(quality_sampling[access_id])
else:
sample_notpassed.append(quality_sampling[access_id])
else:
if passed:
sample_passed.append(quality_sampling[access_id])
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_sampling = store.load()
else:
store.store(quality_sampling)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# ACTION
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Azioni.txt')
openerp_object = 'quality.action'
log_event("Start import %s" % openerp_object)
#quality_action = {} # caricato nella fase pre (tolto perchè resetta e non ho il child)
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s ] counter['tot'], ] not %s but now %s! Jump:" % (
counter['tot'], max_col, len(line)))
continue
counter['tot'] += 1
access_id = line[0]
ref = format_string(line[1])
date = format_date(line[2])
origin = format_string(line[3]) #TODO da fare alla fine
note = format_string(line[4])
proposed_subject = format_string(line[5])
esit_date = format_date(line[6])
esit_note = format_string(line[7])
child_code = format_string(line[9])
#closed 10
closed_date = format_date(line[11])
proposing_entity = format_string(line[13])
action_type = format_string(line[16])
ref = "ACP%05d" % (int(ref or '0'))
if action_type == "Azione Preventiva":
action_type_id = 'preventive'
elif action_type == "Intervento di Miglioramento":
action_type_id = 'enhance'
else: # action_type == "Azione Correttiva" or ""
action_type_id = 'corrective' # default
child_id = quality_action.get(child_code, False)
origin = origin_action.get(origin, False)
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'date': date,
'origin': origin,
'note': note,
'proposed_subject': proposed_subject,
'proposing_entity': proposing_entity,
'esit_date': esit_date,
'closed_date': closed_date,
'esit_note': esit_note,
'child_id': child_id,
'type': action_type_id,
'access_id': access_id,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
ref)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, ref)
quality_action[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id=sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, name)
quality_action[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
if child_id:
sock.execute(dbname, uid, pwd, 'quality.action', 'write',
child_id, {
'parent_id' : quality_action[access_id],
'origin': data['origin'], # TODO Non importa
})
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_action = store.load()
else:
store.store(quality_action)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# ACTION INTERVENT
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = True
file_input = os.path.expanduser('~/ETL/generalfood/Interventi.txt')
openerp_object = 'quality.action.intervent'
log_event("Start import %s" % openerp_object)
quality_action_intervent = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
access_id = line[0]
action_code = format_string(line[1])
name = format_string(line[2])
manager_code = format_string(line[3])
deadline = format_date(line[4])
action_id = quality_action.get(action_code, False)
manager_id = 1
# Start of importation:
counter['tot'] += 1
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'name': name,
'manager_id': manager_id,
'deadline': deadline,
'action_id': action_id,
'access_id': access_id,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
access_id)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, access_id)
quality_action_intervent[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, access_id)
quality_action_intervent[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_action_intervent = store.load()
else:
store.store(quality_action_intervent)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# ACCEPTATION
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = False
file_input = os.path.expanduser('~/ETL/generalfood/Accettazioni.txt')
openerp_object = 'quality.acceptation'
log_event("Start import %s" % openerp_object)
quality_acceptation = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] %s Different cols not %s but now %s! Jumped:" % (
counter['tot'], max_col, len(line)))
continue
counter['tot'] += 1
access_id = line[0]
name = format_string(line[1])
date = format_date(line[2])
partner_code = format_string(line[3])
origin = format_string(line[5])
note = format_string(line[6])
cancel = format_boolean(line[11])
if not date:
date = data.get('date', default_error_data)
ref = "ACPT%05d" % (int(name or '0'))
if partner_code:
partner_id = get_or_create_partner(partner_code, 'supplier', False,
res_partner_customer, res_partner_supplier)
else:
partner_id = False
if not partner_id:
log_event("[WARN] Partner not found in %s" % (ref))
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
data = {
'ref': ref,
'date': date,
'origin': origin,
'partner_id': partner_id,
'note': note,
'cancel': cancel,
'access_id': access_id,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ",
name)
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, name)
quality_acceptation[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create",
openerp_object, name)
quality_acceptation[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_acceptation = store.load()
else:
store.store(quality_acceptation)
log_event("Total %(tot)s (N: %(new)s, U: %(upd)s)" % counter)
# -----------------------------------------------------------------------------
# ACCEPTATION DETAILS
# -----------------------------------------------------------------------------
only_create = False
jump_because_imported = False
file_input = os.path.expanduser('~/ETL/generalfood/Dettagli.txt')
openerp_object = 'quality.acceptation.line'
log_event("Start import %s" % openerp_object)
quality_acceptation_line = {}
lines = csv.reader(open(file_input, 'rb'), delimiter=separator)
counter = {'tot': -1, 'new': 0, 'upd': 0}
max_col = 0
try:
for line in lines:
if jump_because_imported:
break
if counter['tot'] < 0:
counter['tot'] += 1
max_col = len(line)
continue
if len(line):
if len(line) != max_col:
log_event("[ERROR] Different col not %s but now %s! Jumped:" % (
max_col, len(line)))
continue
counter['tot'] += 1
# Read line
access_id = line[0]
acceptation_code = format_string(line[1])
lot_code = format_string(line[2])
conformed_code = format_string(line[3])
qty_arrived = format_currency(line[4])
qty_expected = format_currency(line[5])
temp = format_boolean(line[6]) # Motivo
label = format_boolean(line[7]) # Etichetta
package = format_boolean(line[8]) # Stato
#visual = format_boolean(line[9]) # Visivo
expired = format_boolean(line[10]) # Scadenza
motivation = format_string(line[11])
qty = format_boolean(line[12]) # Quantitativo
quality = False # TODO esiste sul file da importare??
lot_id = False
if not lot_code or lot_code == '0':
log_event("[ERROR] Lot empty, jumped:", acceptation_code)
continue
lot_id = stock_production_lot.get(lot_code, False)
if not lot_id:
log_event("[ERROR] Lot not found, temp created:", lot_code)
# Create lot (after will be updated from syncro with MySQL)
lot_id = sock.execute(dbname, uid, pwd, 'stock.production.lot',
'create', {
'name': lot_code,
'product_id': default_product_id,
'date': datetime.now().strftime("%Y-%m-%d"),
'default_supplier_id': False
})
# test if record exists (basing on Ref. as code of Partner)
item = sock.execute(dbname, uid, pwd, openerp_object , 'search', [
('access_id', '=', access_id)])
if conformed_code and conformed_code != '0':
conformed_id = quality_conformed.get('conformed_code', False)
if not conformed_id:
conformed_ids = sock.execute(dbname, uid, pwd,
'quality.conformed', 'search', [
('access_id', '=', conformed_code)])
if conformed_ids:
conformed_id = conformed_ids[0]
else:
log_event("[WARNING] Conformed_id not found, not write: %s" % counter['tot'])
else:
conformed_id = False #quality_conformed.get(conformed_code, False)
acceptation_id = quality_acceptation.get(acceptation_code, False)
if not acceptation_id:
log_event("[ERROR] %s. No parent form: %s" % (
counter['tot'], acceptation_code))
continue
data = {
'acceptation_id': acceptation_id,
'lot_id': lot_id,
'qty_arrived': qty_arrived,
'qty_expected': qty_expected,
# Motivi check:
'qty': qty,
'temp': temp,
'label': label,
'package': package,
'expired': expired,
#'qty_package': qty_package,
'conformed_id': conformed_id,
'motivation': motivation,
'access_id': access_id,
}
if item: # already exist
counter['upd'] += 1
try:
if only_create:
log_event(
"[INFO]", counter['tot'], "Write",
openerp_object, " (jumped only_create clause: ")
else:
item_mod = sock.execute(
dbname, uid, pwd, openerp_object, 'write',
item, data)
log_event(
"[INFO]", counter['tot'], "Write", openerp_object)
quality_acceptation_line[access_id] = item[0]
except:
log_event("[ERROR] Modifing data, current record:", data)
else: # new
counter['new'] += 1
try:
openerp_id = sock.execute(
dbname, uid, pwd, openerp_object, 'create', data)
log_event(
"[INFO]", counter['tot'], "Create", openerp_object)
quality_acceptation_line[access_id] = openerp_id
except:
log_event(
"[ERROR] Error creating data, current record: ", data)
# Aggiorno il valore per il ritorno alla scheda accettazione
if conformed_id:
sock.execute(dbname, uid, pwd, 'quality.conformed', 'write',
conformed_id, {
'acceptation_id' : acceptation_id, # Padre della riga
'origin': 'acceptation',
})
except:
log_event('[ERROR] Error importing data!')
raise
store = status(openerp_object)
if jump_because_imported:
quality_acceptation_line = store.load()
else:
store.store(quality_acceptation_line)
# -----------------------------------------------------------------------------
# Trigger events:
# -----------------------------------------------------------------------------
# ------------
# ACCEPTATION:
# ------------
# TODO
# -------
# CLAIMS:
# -------
# Claim (bozza > opened)
openerp_object = 'quality.claim'
domain = [('state','=','draft')]
field_list = ('id',)
log_event('Start trigger WF Claim (bozza > open)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_draft_opened', item_id)
log_event('[INFO] bozza > opened, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile bozza > opened, ID: %s' % item_id)
log_event('End trigger WF Claim (bozza > open) record %s' % len(item_ids))
# Claim (opened > nc > done > close > saw )
domain = [('state', '=', 'opened'), ('need_accredit', '=', True)]
field_list = ('id')
log_event('Start trigger WF Claim (opened > nc > done > close > saw)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_opened_nc', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_nc_done', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_done_closed', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_closed_saw', item_id)
log_event('[INFO] opened > nc > done > close > saw, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile opened > nc > done > close > saw, ID: %s' % item_id)
log_event('End trigger WF Claim (opened > nc > done > close > saw) record %s' % len(item_ids))
# Claim (opened > closed > saw)
domain = [('state', '=', 'opened')]
field_list = ('id')
log_event('Start trigger WF Claim (opened > closed > saw)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_opened_closed', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_claim_closed_saw', item_id)
log_event('[INFO] opened > closed > saw, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile opened > closed > saw, ID: %s' % item_id)
log_event('End trigger WF Claim (opened > closed > saw) record %s' % len(item_ids))
# -------
# Action:
# -------
# Action (draft > opened)
openerp_object = 'quality.action'
domain = [('state','=','draft')]
field_list = ('id',)
log_event('Start trigger WF Action (draft > opened)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_action_draft_opened', item_id)
log_event('[INFO] bozza > opened, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile bozza > opened, ID: %s' % item_id)
log_event('End trigger WF Claim (bozza > opened) record %s' % len(item_ids))
# Action (opened > closed > saw) > quelle con la data di chiusura
domain = [('state','=','opened'),('closed_date','!=',False)]
field_list = ('id',)
log_event('Start trigger WF Action (opened > closed > saw)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_action_opened_closed', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_action_closed_saw', item_id)
log_event('[INFO] opened > closed > saw, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile opened > closed > saw, ID: %s' % item_id)
log_event('End trigger WF Claim (opened > closed > saw) record %s' % len(item_ids))
# ----------
# Conformed:
# ----------
# Conformed (draft > opened > closed > saw) >> non cancellati
openerp_object = 'quality.conformed'
domain = [('state','=','draft'), ('cancel', '=', False)]
field_list = ('id', )
log_event('Start trigger WF Conformed (draft > opened > closed > saw)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_conformed_draft_opened', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_conformed_opened_closed', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_conformed_closed_saw', item_id)
log_event('[INFO] draft > opened > closed > saw, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile draft > opened > closed > saw, ID: %s' % item_id)
log_event('End trigger WF Claim (draft > opened > closed > saw) record %s' % len(item_ids))
domain = [('state','=','draft'), ('cancel', '=', True)]
field_list = ('id', )
log_event('Start trigger WF Conformed (draft > opened > cancel)')
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_conformed_draft_opened', item_id)
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_conformed_opened_cancel', item_id)
log_event('[INFO] draft > opened > closed > saw, ID: %s' % item_id)
except:
log_event('[ERROR] Impossibile draft > opened > closed > saw, ID: %s' % item_id)
log_event('End trigger WF Claim (draft > opened > closed > saw) record %s' % len(item_ids))
# ---------
# Sampling:
# ---------
openerp_object = 'quality.sampling'
comment = "Sampling (draft > opened > passed) >> passati"
log_event('Start trigger WF %s' % comment)
for item_id in sample_passed:
try:
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_sampling_draft_opened', item_id)
except:
log_event('[WARNING] Impossibile %s, ID: %s' % (comment, item_id))
try:
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_sampling_opened_passed', item_id)
log_event('[INFO] %s, ID: %s' % (comment, item_id))
except:
log_event('[ERROR] Impossibile %s, ID: %s' % (comment, item_id))
log_event('End trigger WF %s record %s' % (comment, len(item_ids)))
comment = "Sampling (draft > opened > notpassed) >> not passati"
log_event('Start trigger WF %s' % comment)
for item_id in sample_notpassed:
try:
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_sampling_draft_opened', item_id)
except:
log_event('[WARNING] Impossibile aprire il campionamento %s, ID: %s' % (comment, item_id))
try:
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_sampling_opened_notpassed', item_id)
log_event('[INFO] %s, ID: %s' % (comment, item_id))
except:
log_event('[ERROR] Impossibile mettere non passato %s, ID: %s' % (comment, item_id))
log_event('End trigger WF %s record %s' % (comment, len(item_ids)))
comment = "Sampling (draft > opened) >> aperti"
domain = [('state','=','draft')]
field_list = ('id', )
log_event('Start trigger WF %s' % comment)
item_ids = sock.execute(dbname, uid, pwd, openerp_object, 'search', domain)
for item in sock.execute(dbname, uid, pwd, openerp_object, 'read', item_ids, field_list):
try:
item_id = item['id']
sock.exec_workflow(dbname, uid, pwd, openerp_object,
'trigger_sampling_draft_opened', item_id)
log_event('[INFO] %s, ID: %s' % (comment, item_id))
except:
log_event('[ERROR] Impossibile %s, ID: %s' % (comment, item_id))
log_event('End trigger WF %s record %s' % (comment, len(item_ids)))
log_event("PROMEMORIA: Aggiornare i contatori nel programma, valori prossimi: ") # TODO mettere il counter total
log_event("End of importation")
| Micronaet/micronaet-quality | quality/etl/import.py | Python | agpl-3.0 | 101,183 |
"""
benson.py
Main program to run benson on boot. To setup auto run on boot, from
terminal type 'crontab -e' to edit crontab, add this line to the end of
the file:
@reboot python3 /home/pi/Benson/benson.py &
To 'map' a module to the LED grid use to the module_map dictionary,
add a module name as a string value to a (x,y) coordinate key, the
module must have a public method that has the same name as the module.
REQUIRED PYTHON3 PACKAGES:
gtts - google text to speech api
REQUIRED 3RD PARRTY APIs:
forecast.io
PUBLIC METHODS:
loop() - main loop
run(coorindates) - run a module mapped to the given coorindates
self_update() - pulls newest code from github repo
exit() - exit the program
reboot() - reboot the pi
shutdown() - shutdown the pi
speak_date() - speak today's date
speak_time() - speak the current time
mount() - mount all external drives attached to the pi
unmount() - unmount all drives currently attached to the pi
ambient() - speak ambient temp and humidity from sense hat sensors
MAPPED MODULES:
clock.py - show a digital clock on the led grid
tic_tac_toe.py - a tic tac toe game
eight_queens.py - an n-queen problem solver
forecast.py - get weather forcast with forecast.io API
roll_dice.py - roll the dice
USAGE:
create a Benson object, then call the loop() method.
"""
__all__ = []
import os
import sys
import time
import datetime
import subprocess
from sense_hat import SenseHat, ACTION_RELEASED
from _exceptions import *
from _helper import QUADRANTS, set_quadrants, speak
IDS = ('sda1', 'sdb1', 'sdc1', 'sdd1')
# map module names to the LED grid by using x, y cooridnate tuples as key.
# modules must have a public method name that matches its module name.
module_map = {
# row 1 of LED grid
(7, 0): 'speak_date', (7, 1): 'speak_time', (7, 2): 'ambient', (7, 3): 'forecast',
(7, 4): 'clock', (7, 5): '', (7, 6): '', (7, 7): '',
# row 2 of LED grid
(6, 0): '', (6, 1): '', (6, 2): '', (6, 3): '',
(6, 4): '', (6, 5): '', (6, 6): '', (6, 7): '',
# row 3 of LED grid
(5, 0): '', (5, 1): '', (5, 2): '', (5, 3): '',
(5, 4): '', (5, 5): '', (5, 6): '', (5, 7): '',
# row 4 of LED grid
(4, 0): '', (4, 1): '', (4, 2): '', (4, 3): '',
(4, 4): '', (4, 5): '', (4, 6): '', (4, 7): '',
# row 5 of LED grid
(3, 0): '', (3, 1): '', (3, 2): '', (3, 3): '',
(3, 4): '', (3, 5): '', (3, 6): '', (3, 7): '',
# row 6 of LED grid
(2, 0): 'tic_tac_toe', (2, 1): 'roll_dice', (2, 2): '', (2, 3): '',
(2, 4): '', (2, 5): '', (2, 6): '', (2, 7): '',
# row 7 of LED grid
(1, 0): 'eight_queens', (1, 1): '', (1, 2): '', (1, 3): '',
(1, 4): '', (1, 5): '', (1, 6): '', (1, 7): '',
# row 8 of LED grid
(0, 0): 'mount', (0, 1): 'unmount', (0, 2): '', (0, 3): '',
(0, 4): 'self_update', (0, 5): 'exit', (0, 6): 'reboot', (0, 7): 'shutdown'
}
class Benson:
"""Benson the helper."""
def __init__(self):
"""Initialize Benson."""
self.repo = 'Benson'
self.sense = SenseHat()
self.sense.low_light = True
self.x = 0
self.y = 0
self.datetime_ = None
self.mounted = 0
self.unmounted = 0
self.ambient_temperature = None
self.ambient_humidity = None
def loop(self):
"""Main program loop."""
speak('greeting')
self.sense.show_message('(:', text_colour=(200, 0, 200))
self.sense.set_pixel(self.x, self.y, (200, 0, 200))
while True:
directions = {
'up': ('y', self.y - 1),
'down': ('y', self.y + 1),
'left': ('x', self.x - 1),
'right': ('x', self.x + 1)
}
event = self.sense.stick.wait_for_event(emptybuffer=True)
if event.action != ACTION_RELEASED:
if event.direction == 'middle':
self.sense.rotation = 90
self.run((self.x, self.y)) # run the program mapped to x, y
self.sense.rotation = 0
else:
coordinate, change = directions.get(event.direction)
if change == -1:
change = 7
elif change == 8:
change = 0
if coordinate == 'x':
self.x = change
elif coordinate == 'y':
self.y = change
self.sense.clear()
self.sense.set_pixel(self.x, self.y, (200, 0, 200))
def run(self, coordinates):
"""Run a mapped method or module."""
program = module_map[coordinates]
try:
method = getattr(self, program)
except AttributeError:
pass
else:
method()
return None
try:
mod = __import__(program)
method = getattr(mod, program)
except ValueError:
self.sense.rotation = 90
self.sense.show_letter('X', text_colour=(200, 0, 0))
time.sleep(0.5)
except AttributeError:
self.sense.rotation = 90
self.sense.show_letter('?', text_colour=(200, 0, 0))
time.sleep(0.5)
except Exception as e:
raise BensonPanicError('Unable to load module: ' + program) from e
else:
method()
return None
def self_update(self):
"""Clone github repo for newest code."""
repo_url = 'https://github.com/wenbin5/' + self.repo + '.git'
speak('self_update1')
try:
os.system('sudo rm -R ' + self.repo)
os.system('sudo git clone ' + repo_url)
os.system('sudo chown -R pi:pi ' + self.repo)
except Exception as e:
raise BensonPanicError('Unable to self update.') from e
else:
speak('self_update2')
self.reboot()
def exit(self):
"""Exit the program."""
speak('goodbye')
self.sense.clear()
sys.exit()
def reboot(self):
"""Reboot the Raspberry pi."""
self.sense.show_message('>>', text_colour=(0, 200, 0), scroll_speed=0.2)
self.sense.clear()
speak('reboot')
os.system('sudo reboot now')
def shutdown(self):
"""Shutdown the Raspberry pi."""
self.sense.show_message('<<', text_colour=(200, 0, 0), scroll_speed=0.2)
self.sense.clear()
speak('shutdown')
os.system('sudo shutdown -h now')
def speak_date(self):
"""Speak the current date."""
self.datetime_ = datetime.datetime.now()
date = self.datetime_.ctime().split(':')[0][:-3]
message = 'today is ' + date
speak('date', message)
month, day = str(self.datetime_.month), str(self.datetime_.day)
set_quadrants(QUADRANTS[:2], month, (0, 200, 0))
set_quadrants(QUADRANTS[2:], day, (0, 200, 200))
time.sleep(3)
def speak_time(self):
"""Speak the current time."""
self.datetime_ = datetime.datetime.now()
hour, minute = str(self.datetime_.hour), str(self.datetime_.minute)
message = 'It is ' + hour + ' ' + minute
speak('time', message)
set_quadrants(QUADRANTS[:2], hour, (0, 200, 0))
set_quadrants(QUADRANTS[2:], minute, (0, 200, 200))
time.sleep(3)
def mount(self):
"""Mount up to 4 external thumb drives."""
for d in IDS:
try:
command = 'sudo mount /dev/' + d + ' /media/' + d + ' -o uid=pi,gid=pi'
if not subprocess.getoutput(command):
self.mounted += 1
except Exception as e:
raise BensonPanicError('Unable to mount drives.') from e
message = 'Mounted ' + str(self.mounted) + ' drives.'
speak('mount_ex', message)
set_quadrants(QUADRANTS[:self.mounted], 'abcd', (0, 200, 0))
time.sleep(3)
def unmount(self):
"""Unmount all mounted thumb drives."""
for d in IDS:
try:
command = 'sudo umount /dev/' + d
if not subprocess.getoutput(command):
self.unmounted += 1
except Exception as e:
raise BensonPanicError('Unable to unmount drives.') from e
self.mounted = 0
message = 'Unmounted ' + str(self.unmounted) + ' drives.'
speak('umount_ex', message)
set_quadrants(QUADRANTS[:self.unmounted], 'abcd', (200, 0, 0))
time.sleep(3)
def ambient(self):
"""Speak the ambient temperature and the humidity."""
t1 = self.sense.get_temperature_from_pressure()
t2 = self.sense.get_temperature_from_humidity()
temp_avg = (t1+t2) / 2
self.ambient_temperature = str(int(temp_avg*1.8 + 32))
self.ambient_humidity = str(int(self.sense.humidity))
message = 'Ambient temp is ' + self.ambient_temperature
message += ' degrees with humidity at ' + self.ambient_humidity + ' %.'
speak('ambient', message)
set_quadrants(QUADRANTS[:2], self.ambient_temperature, (200, 0, 0))
set_quadrants(QUADRANTS[2:], self.ambient_humidity, (0, 200, 200))
time.sleep(3)
if __name__ == '__main__':
benson = Benson()
benson.loop()
| wenbin5/Benson | benson.py | Python | mit | 9,393 |
# Developed for module tiericide, this script will quickly print out a market
# conversion map based on patch notes, as well as database conversion mapping.
import argparse
import os.path
import sqlite3
import sys
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(str(__file__, sys.getfilesystemencoding()))
sys.path.append(os.path.realpath(os.path.join(path, "..")))
# change to correct conversion
rename_phrase = " is now known as "
conversion_phrase = " is being converted to "
text = """Partial Weapon Navigation is being converted to Phased Scoped Target Painter
Indirect Scanning Dampening Unit I is being converted to Phased Muon Scoped Sensor Dampener
'Broker' Remote Sensor Dampener I is being converted to 'Executive' Remote Sensor Dampener
Initiated Ion Field ECM I is being converted to Hypnos Scoped Magnetometric ECM
FZ-3 Subversive Spatial Destabilizer ECM is being converted to BZ-5 Scoped Gravimetric ECM
'Penumbra' White Noise ECM is being converted to Umbra Scoped Radar ECM
Faint Phase Inversion ECM I is being converted to Enfeebling Scoped Ladar ECM
'Hypnos' Multispectral ECM I is being converted to Compulsive Scoped Multispectral ECM
1Z-3 Subversive ECM Eruption is being converted to Cetus Scoped Burst Jammer
'Prayer' Remote Tracking Computer is being converted to P-S Compact Remote Tracking Computer
'Tycoon' Remote Tracking Computer is being converted to 'Enterprise' Remote Tracking Computer
Monopulse Tracking Mechanism I is being converted to F-12 Enduring Tracking Computer
'Orion' Tracking CPU I is being converted to Optical Compact Tracking Computer
'Economist' Tracking Computer I is being converted to 'Marketeer' Tracking Computer
Beta-Nought Tracking Mode is being converted to 'Basic' Tracking Enhancer
Azimuth Descalloping Tracking Enhancer is being converted to 'Basic' Tracking Enhancer
F-AQ Delay-Line Scan Tracking Subroutines is being converted to 'Basic' Tracking Enhancer
Beam Parallax Tracking Program is being converted to 'Basic' Tracking Enhancer
Sigma-Nought Tracking Mode I is being converted to Fourier Compact Tracking Enhancer
Auto-Gain Control Tracking Enhancer I is being converted to Fourier Compact Tracking Enhancer
F-aQ Phase Code Tracking Subroutines is being converted to Fourier Compact Tracking Enhancer
Lateral Gyrostabilizer is being converted to 'Basic' Gyrostabilizer
F-M2 Weapon Inertial Suspensor is being converted to 'Basic' Gyrostabilizer
Hydraulic Stabilization Actuator is being converted to 'Basic' Gyrostabilizer
Stabilized Weapon Mounts is being converted to 'Basic' Gyrostabilizer
Cross-Lateral Gyrostabilizer I is being converted to Counterbalanced Compact Gyrostabilizer
F-M3 Munition Inertial Suspensor is being converted to Counterbalanced Compact Gyrostabilizer
Pneumatic Stabilization Actuator I is being converted to Counterbalanced Compact Gyrostabilizer
Monophonic Stabilization Actuator I is being converted to 'Kindred' Gyrostabilizer
Monophonic Stabilization Actuator I Blueprint is being converted to 'Kindred' Gyrostabilizer Blueprint
Heat Exhaust System is being converted to 'Basic' Heat Sink
C3S Convection Thermal Radiator is being converted to 'Basic' Heat Sink
'Boreas' Coolant System is being converted to 'Basic' Heat Sink
Stamped Heat Sink is being converted to 'Basic' Heat Sink
Thermal Exhaust System I is being converted to Extruded Compact Heat Sink
C4S Coiled Circuit Thermal Radiator is being converted to Extruded Compact Heat Sink
'Skadi' Coolant System I is being converted to Extruded Compact Heat Sink
'Mangonel' Heat Sink I is being converted to 'Trebuchet' Heat Sink I
'Mangonel' Heat Sink I Blueprint is being converted to 'Trebuchet' Heat Sink Blueprint
Insulated Stabilizer Array is being converted to 'Basic' Magnetic Field Stabilizer
Linear Flux Stabilizer is being converted to 'Basic' Magnetic Field Stabilizer
Gauss Field Balancer is being converted to 'Basic' Magnetic Field Stabilizer
Magnetic Vortex Stabilizer is being converted to 'Basic' Magnetic Field Stabilizer
Insulated Stabilizer Array I is being converted to Vortex Compact Magnetic Field Stabilizer
Linear Flux Stabilizer I is being converted to Vortex Compact Magnetic Field Stabilizer
Gauss Field Balancer I is being converted to Vortex Compact Magnetic Field Stabilizer
'Capitalist' Magnetic Field Stabilizer I is being converted to 'Monopoly' Magnetic Field Stabilizer
'Capitalist' Magnetic Field Stabilizer I Blueprint is being converted to 'Monopoly' Magnetic Field Stabilizer Blueprint
Muon Coil Bolt Array I is being converted to Crosslink Compact Ballistic Control System
Multiphasic Bolt Array I is being converted to Crosslink Compact Ballistic Control System
'Pandemonium' Ballistic Enhancement is being converted to Crosslink Compact Ballistic Control System
Ballistic 'Purge' Targeting System I is being converted to 'Full Duplex' Ballistic Control System
Ballistic 'Purge' Targeting System I Blueprint is being converted to 'Full Duplex' Ballistic Control System Blueprint
'Langour' Drive Disruptor I is being converted to X5 Enduring Stasis Webifier
Patterned Stasis Web I is being converted to Fleeting Compact Stasis Webifier
Fleeting Progressive Warp Scrambler I is being converted to Faint Epsilon Scoped Warp Scrambler
Fleeting Warp Disruptor I is being converted to Faint Scoped Warp Disruptor
GLFF Containment Field is being converted to 'Basic' Damage Control
Interior Force Field Array is being converted to 'Basic' Damage Control
F84 Local Damage System is being converted to 'Basic' Damage Control
Systematic Damage Control is being converted to 'Basic' Damage Control
'Gonzo' Damage Control I is being converted to 'Radical' Damage Control
'Gonzo' Damage Control I Blueprint is being converted to 'Radical' Damage Control Blueprint
Emergency Damage Control I is being converted to IFFA Compact Damage Control
F85 Peripheral Damage System I is being converted to IFFA Compact Damage Control
Pseudoelectron Containment Field I is being converted to IFFA Compact Damage Control
Micro Ld-Acid Capacitor Battery I is being converted to 'Micro' Cap Battery
Micro Ohm Capacitor Reserve I is being converted to 'Micro' Cap Battery
Micro F-4a Ld-Sulfate Capacitor Charge Unit is being converted to 'Micro' Cap Battery
Micro Peroxide Capacitor Power Cell is being converted to 'Micro' Cap Battery
Micro Capacitor Battery II is being converted to 'Micro' Cap Battery
Small Ohm Capacitor Reserve I is being converted to Small Compact Pb-Acid Cap Battery
Small F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Small Compact Pb-Acid Cap Battery
Small Peroxide Capacitor Power Cell is being converted to Small Compact Pb-Acid Cap Battery
Medium Ohm Capacitor Reserve I is being converted to Medium Compact Pb-Acid Cap Battery
Medium F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Medium Compact Pb-Acid Cap Battery
Medium Peroxide Capacitor Power Cell is being converted to Medium Compact Pb-Acid Cap Battery
Large Ohm Capacitor Reserve I is being converted to Large Compact Pb-Acid Cap Battery
Large F-4a Ld-Sulfate Capacitor Charge Unit is being converted to Large Compact Pb-Acid Cap Battery
Large Peroxide Capacitor Power Cell is being converted to Large Compact Pb-Acid Cap Battery
ECCM - Radar I is being converted to Sensor Booster I
ECCM - Ladar I is being converted to Sensor Booster I
ECCM - Magnetometric I is being converted to Sensor Booster I
ECCM - Gravimetric I is being converted to Sensor Booster I
ECCM - Omni I is being converted to Sensor Booster I
ECCM - Radar I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Ladar I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Magnetometric I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Gravimetric I Blueprint is being converted to Sensor Booster I Blueprint
ECCM - Omni I Blueprint is being converted to Sensor Booster I Blueprint
Alumel Radar ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Ladar ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Gravimetric ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Omni ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Alumel Magnetometric ECCM Sensor Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Ladar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Gravimetric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Omni ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Radar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Magnetometric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Extra Radar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Ladar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Gravimetric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Extra Magnetometric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Gravimetric Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Radar Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Omni Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Ladar Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Magnetometric Positional ECCM Sensor System I is being converted to F-90 Compact Sensor Booster
Incremental Radar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Ladar ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Gravimetric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Incremental Magnetometric ECCM Scanning Array I is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Radar Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Ladar Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Gravimetric Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Omni Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Prototype ECCM Magnetometric Sensor Cluster is being converted to Alumel-Wired Enduring Sensor Booster
Conjunctive Radar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Ladar ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Gravimetric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
Conjunctive Magnetometric ECCM Scanning Array I is being converted to F-90 Compact Sensor Booster
ECCM - Omni II is being converted to Sensor Booster II
ECCM - Gravimetric II is being converted to Sensor Booster II
ECCM - Ladar II is being converted to Sensor Booster II
ECCM - Magnetometric II is being converted to Sensor Booster II
ECCM - Radar II is being converted to Sensor Booster II
ECCM - Omni II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Gravimetric II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Ladar II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Magnetometric II Blueprint is being converted to Sensor Booster II Blueprint
ECCM - Radar II Blueprint is being converted to Sensor Booster II Blueprint
'Forger' ECCM - Magnetometric I is being converted to 'Shady' Sensor Booster
'Forger' ECCM - Magnetometric I Blueprint is being converted to 'Shady' Sensor Booster Blueprint
Basic RADAR Backup Array is being converted to 'Basic' Signal Amplifier
Basic Ladar Backup Array is being converted to 'Basic' Signal Amplifier
Basic Gravimetric Backup Array is being converted to 'Basic' Signal Amplifier
Basic Magnetometric Backup Array is being converted to 'Basic' Signal Amplifier
Basic Multi Sensor Backup Array is being converted to 'Basic' Signal Amplifier
Emergency Magnetometric Scanners is being converted to 'Basic' Signal Amplifier
Emergency Multi-Frequency Scanners is being converted to 'Basic' Signal Amplifier
Emergency RADAR Scanners is being converted to 'Basic' Signal Amplifier
Emergency Ladar Scanners is being converted to 'Basic' Signal Amplifier
Emergency Gravimetric Scanners is being converted to 'Basic' Signal Amplifier
Sealed RADAR Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Magnetometric Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Multi-Frequency Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Ladar Backup Cluster is being converted to 'Basic' Signal Amplifier
Sealed Gravimetric Backup Cluster is being converted to 'Basic' Signal Amplifier
Surplus RADAR Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative RADAR Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Magnetometric Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Magnetometric Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Multi-Frequency Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Multi-Frequency Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Ladar Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Ladar Backup Sensors is being converted to 'Basic' Signal Amplifier
Surplus Gravimetric Reserve Array is being converted to 'Basic' Signal Amplifier
F-42 Reiterative Gravimetric Backup Sensors is being converted to 'Basic' Signal Amplifier
Gravimetric Backup Array I is being converted to Signal Amplifier I
Ladar Backup Array I is being converted to Signal Amplifier I
Magnetometric Backup Array I is being converted to Signal Amplifier I
Multi Sensor Backup Array I is being converted to Signal Amplifier I
RADAR Backup Array I is being converted to Signal Amplifier I
Gravimetric Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Ladar Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Magnetometric Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Multi Sensor Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
RADAR Backup Array I Blueprint is being converted to Signal Amplifier I Blueprint
Protected Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected Multi-Frequency Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Protected RADAR Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Reserve Gravimetric Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Ladar Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Magnetometric Scanners is being converted to F-89 Compact Signal Amplifier
Reserve Multi-Frequency Scanners is being converted to F-89 Compact Signal Amplifier
Reserve RADAR Scanners is being converted to F-89 Compact Signal Amplifier
Secure Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Secure Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Gravimetric Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Ladar Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Magnetometric Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive Multi-Frequency Backup Sensors is being converted to F-89 Compact Signal Amplifier
F-43 Repetitive RADAR Backup Sensors is being converted to F-89 Compact Signal Amplifier
Shielded Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Shielded Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Surrogate Gravimetric Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Ladar Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Magnetometric Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate Multi-Frequency Reserve Array I is being converted to F-89 Compact Signal Amplifier
Surrogate RADAR Reserve Array I is being converted to F-89 Compact Signal Amplifier
Warded Gravimetric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Ladar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Magnetometric Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Warded Radar Backup Cluster I is being converted to F-89 Compact Signal Amplifier
Gravimetric Backup Array II is being converted to Signal Amplifier II
Ladar Backup Array II is being converted to Signal Amplifier II
Magnetometric Backup Array II is being converted to Signal Amplifier II
Multi Sensor Backup Array II is being converted to Signal Amplifier II
RADAR Backup Array II is being converted to Signal Amplifier II
Gravimetric Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Ladar Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Magnetometric Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Multi Sensor Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
RADAR Backup Array II Blueprint is being converted to Signal Amplifier II Blueprint
Gravimetric Firewall is being converted to 'Firewall' Signal Amplifier
Ladar Firewall is being converted to 'Firewall' Signal Amplifier
Magnetometric Firewall is being converted to 'Firewall' Signal Amplifier
Multi Sensor Firewall is being converted to 'Firewall' Signal Amplifier
RADAR Firewall is being converted to 'Firewall' Signal Amplifier
ECCM Projector I is being converted to Remote Sensor Booster I
ECCM Projector I Blueprint is being converted to Remote Sensor Booster I Blueprint
Scattering ECCM Projector I is being converted to Linked Enduring Sensor Booster
Piercing ECCM Emitter I is being converted to Coadjunct Scoped Remote Sensor Booster
Spot Pulsing ECCM I is being converted to F-23 Compact Remote Sensor Booster
Phased Muon ECCM Caster I is being converted to F-23 Compact Remote Sensor Booster
ECCM Projector II is being converted to Remote Sensor Booster II
ECCM Projector II Blueprint is being converted to Remote Sensor Booster II Blueprint
Prototype Sensor Booster is being converted to Alumel-Wired Enduring Sensor Booster
Supplemental Scanning CPU I is being converted to F-90 Compact Sensor Booster
Amplitude Signal Enhancer is being converted to 'Basic' Signal Amplifier
'Acolyth' Signal Booster is being converted to 'Basic' Signal Amplifier
Type-E Discriminative Signal Augmentation is being converted to 'Basic' Signal Amplifier
F-90 Positional Signal Amplifier is being converted to 'Basic' Signal Amplifier
'Mendicant' Signal Booster I is being converted to F-89 Compact Signal Amplifier
Wavelength Signal Enhancer I is being converted to F-89 Compact Signal Amplifier
Type-D Attenuation Signal Augmentation is being converted to F-89 Compact Signal Amplifier
Connected Remote Sensor Booster is being converted to F-23 Compact Remote Sensor Booster
'Boss' Remote Sensor Booster is being converted to 'Bootleg' Remote Sensor Booster
'Entrepreneur' Remote Sensor Booster is being converted to 'Bootleg' Remote Sensor Booster
'Pacifier' Large Remote Armor Repairer is being converted to 'Peace' Large Remote Armor Repairer
'Pacifier' Large Remote Armor Repairer Blueprint is being converted to 'Peace' Large Remote Armor Repairer Blueprint
'Broker' Remote Sensor Dampener I Blueprint is being converted to 'Executive' Remote Sensor Dampener Blueprint
'Tycoon' Remote Tracking Computer Blueprint is being converted to 'Enterprise' Remote Tracking Computer Blueprint
'Economist' Tracking Computer I Blueprint is being converted to 'Marketeer' Tracking Computer Blueprint"""
def main(old, new):
# Open both databases and get their cursors
old_db = sqlite3.connect(os.path.expanduser(old))
old_cursor = old_db.cursor()
new_db = sqlite3.connect(os.path.expanduser(new))
new_cursor = new_db.cursor()
renames = {}
conversions = {}
for x in text.splitlines():
if conversion_phrase in x:
c = x.split(conversion_phrase)
container = conversions
elif rename_phrase in x:
c = x.split(rename_phrase)
container = renames
else:
print("Unknown format: {}".format(x))
sys.exit()
old_name, new_name = c[0], c[1]
old_item, new_item = None, None
if "Blueprint" in old_name or "Blueprint" in new_name:
print("Blueprint: Skipping this line: %s"%x)
continue
# gather item info
new_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (new_name,))
for row in new_cursor:
new_item = row[0]
break
old_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (old_name,))
for row in old_cursor:
old_item = row[0]
break
if not old_item:
print("Error finding old item in {} -> {}".format(old_name, new_name))
if not new_item:
print("Error finding new item in {} -> {}".format(old_name, new_name))
if not container.get((new_item,new_name), None):
container[(new_item,new_name)] = []
container[(new_item,new_name)].append((old_item, old_name))
print(" # Renamed items")
for new, old in renames.items():
if len(old) != 1:
print("Incorrect length, key: {}, value: {}".format(new, old))
sys.exit()
old = old[0]
print(" \"{}\": \"{}\",".format(old[1], new[1]))
# Convert modules
print("\n # Converted items")
for new, olds in conversions.items():
for old in olds:
print(" \"{}\": \"{}\",".format(old[1], new[1]))
print()
print()
for new, old in conversions.items():
print(" {}: ( # {}".format(new[0], new[1]))
for item in old:
print(" {}, # {}".format(item[0], item[1]))
print(" ),")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--old", type=str)
parser.add_argument("-n", "--new", type=str)
args = parser.parse_args()
main(args.old, args.new)
| bsmr-eve/Pyfa | scripts/conversion.py | Python | gpl-3.0 | 23,244 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
#Python
import logging
import time
import numpy
import vigra
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.utility import Timer
logger = logging.getLogger(__name__)
class OpStreamingHdf5Reader(Operator):
"""
The top-level operator for the data selection applet.
"""
name = "OpStreamingHdf5Reader"
category = "Reader"
# The project hdf5 File object (already opened)
Hdf5File = InputSlot(stype='hdf5File')
# The internal path for project-local datasets
InternalPath = InputSlot(stype='string')
# Output data
OutputImage = OutputSlot()
class DatasetReadError(Exception):
def __init__(self, internalPath):
self.internalPath = internalPath
self.msg = "Unable to open Hdf5 dataset: {}".format( internalPath )
super(OpStreamingHdf5Reader.DatasetReadError, self).__init__( self.msg )
def __init__(self, *args, **kwargs):
super(OpStreamingHdf5Reader, self).__init__(*args, **kwargs)
self._hdf5File = None
def setupOutputs(self):
# Read the dataset meta-info from the HDF5 dataset
self._hdf5File = self.Hdf5File.value
internalPath = self.InternalPath.value
if internalPath not in self._hdf5File:
raise OpStreamingHdf5Reader.DatasetReadError(internalPath)
dataset = self._hdf5File[internalPath]
try:
# Read the axistags property without actually importing the data
axistagsJson = self._hdf5File[internalPath].attrs['axistags'] # Throws KeyError if 'axistags' can't be found
axistags = vigra.AxisTags.fromJSON(axistagsJson)
except KeyError:
# No axistags found.
ndims = len(dataset.shape)
assert ndims != 0, "OpStreamingHdf5Reader: Zero-dimensional datasets not supported."
assert ndims != 1, "OpStreamingHdf5Reader: Support for 1-D data not yet supported"
assert ndims <= 5, "OpStreamingHdf5Reader: No support for data with more than 5 dimensions."
axisorders = { 2 : 'yx',
3 : 'zyx',
4 : 'zyxc',
5 : 'tzyxc' }
axisorder = axisorders[ndims]
if ndims == 3 and dataset.shape[2] <= 4:
# Special case: If the 3rd dim is small, assume it's 'c', not 'z'
axisorder = 'yxc'
axistags = vigra.defaultAxistags(axisorder)
assert len(axistags) == len( dataset.shape ),\
"Mismatch between shape {} and axisorder {}".format( dataset.shape, axisorder )
# Configure our slot meta-info
self.OutputImage.meta.dtype = dataset.dtype.type
self.OutputImage.meta.shape = dataset.shape
self.OutputImage.meta.axistags = axistags
# If the dataset specifies a datarange, add it to the slot metadata
if 'drange' in self._hdf5File[internalPath].attrs:
self.OutputImage.meta.drange = tuple( self._hdf5File[internalPath].attrs['drange'] )
total_volume = numpy.prod(numpy.array(self._hdf5File[internalPath].shape))
chunks = self._hdf5File[internalPath].chunks
if not chunks and total_volume > 1e8:
self.OutputImage.meta.inefficient_format = True
logger.warn("This dataset ({}{}) is NOT chunked. "
"Performance for 3D access patterns will be bad!"
.format( self._hdf5File.filename, internalPath ))
if chunks:
self.OutputImage.meta.ideal_blockshape = chunks
def execute(self, slot, subindex, roi, result):
t = time.time()
assert self._hdf5File is not None
# Read the desired data directly from the hdf5File
key = roi.toSlice()
hdf5File = self._hdf5File
internalPath = self.InternalPath.value
timer = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Reading HDF5 block: [{}, {}]".format( roi.start, roi.stop ))
timer = Timer()
timer.unpause()
if result.flags.c_contiguous:
hdf5File[internalPath].read_direct( result[...], key )
else:
result[...] = hdf5File[internalPath][key]
if logger.getEffectiveLevel() >= logging.DEBUG:
t = 1000.0*(time.time()-t)
logger.debug("took %f msec." % t)
if timer:
timer.pause()
logger.debug("Completed HDF5 read in {} seconds: [{}, {}]".format( timer.seconds(), roi.start, roi.stop ))
def propagateDirty(self, slot, subindex, roi):
if slot == self.Hdf5File or slot == self.InternalPath:
self.OutputImage.setDirty( slice(None) )
| jakirkham/lazyflow | lazyflow/operators/ioOperators/opStreamingHdf5Reader.py | Python | lgpl-3.0 | 5,866 |
"""--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_teleop.py
\brief This module contains a class for teleoperating the vector
platform with a joystick controller
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from utils import *
from system_defines import *
from vector_msgs.msg import *
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool,Float64
import rospy
import sys
import math
"""
mapping for controller order is dtz_request, powerdown_request, standby_request, tractor_request, balance_request, audio_request,
deadman_input, manual_ovvrd_input, twist_linear_x_input, twist_linear_y_input, twist_angular_z_input
"""
MAP_DTZ_IDX = 0
MAP_PWRDWN_IDX = 1
MAP_STANDBY_IDX = 2
MAP_TRACTOR_IDX = 3
MAP_BALANCE_IDX = 4
MAP_AUDIO_IDX = 5
MAP_REC_GOAL_IDX = 6
MAP_DEADMAN_IDX = 7
MAP_MAN_OVVRD_IDX = 8
NUMBER_OF_MOMENTARY_INPUTS = 9
MAP_TWIST_LIN_X_IDX = 0
MAP_TWIST_LIN_Y_IDX = 1
MAP_TWIST_ANG_Z_IDX = 2
NUMBER_OF_AXIS_INPUTS = 3
class VectorTeleop:
def __init__(self):
self.is_sim = rospy.get_param('~sim',False)
if (False == self.is_sim):
"""
Subscribe to the configuration message
"""
self.config_updated = False
rospy.Subscriber("/vector/feedback/active_configuration", Configuration, self._update_configuration_limits)
start_time = rospy.get_time()
while ((rospy.get_time() - start_time) < 10.0) and (False == self.config_updated):
rospy.sleep(0.05)
if (False == self.config_updated):
rospy.logerr("Timed out waiting for Vector feedback topics make sure the driver is running")
sys.exit(0)
return
else:
self.vel_limit_mps = rospy.get_param('~sim_teleop_x_vel_limit_mps',0.5)
self.vel_limit_mps = rospy.get_param('~sim_teleop_y_vel_limit_mps',0.5)
self.yaw_rate_limit_rps = rospy.get_param('~sim_teleop_yaw_rate_limit_rps',0.5)
self.accel_lim = rospy.get_param('~sim_teleop_accel_lim',0.5)
self.yaw_accel_lim = rospy.get_param('~sim_teleop_yaw_accel_lim',1.0)
default_ctrl_map = dict({'momentary':[[{'is_button':True,'index':4,'set_val':1}],
[{'is_button':True,'index':8,'set_val':1}],
[{'is_button':True,'index':1,'set_val':1}],
[{'is_button':True,'index':2,'set_val':1}],
[{'is_button':True,'index':0,'set_val':1}],
[{'is_button':False,'index':6,'invert_axis':False,'set_thresh':0.9}],
[{'is_button':False,'index':7,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':2,'invert_axis':True,'set_thresh':0.9}],
[{'is_button':False,'index':5,'invert_axis':True,'set_thresh':0.9}]],
'axis_range':[{'index':1,'invert_axis':False},
{'index':0,'invert_axis':False},
{'index':3,'invert_axis':False}]})
"""
Get the mapping for the various commands, defaults are xbox360 wireless
"""
self.ctrl_map = rospy.get_param('~controller_mapping',default_ctrl_map)
"""
Initialize the debounce logic states
"""
self.db_cnt = [0] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
self.send_cmd_none = False
self.no_motion_commands = True
self.last_motion_command_time = 0.0
self.last_joy = rospy.get_time()
self.cfg_cmd = ConfigCmd()
self.cfg_pub = rospy.Publisher('/vector/gp_command', ConfigCmd, queue_size=10)
self.goalrecorder_pub = rospy.Publisher('/vector/record_pose',Bool, queue_size=10)
self.motion_cmd = Twist()
self.limited_cmd = Twist()
self.motion_pub = rospy.Publisher('/vector/teleop/cmd_vel', Twist, queue_size=10)
self.override_pub = rospy.Publisher("/vector/manual_override/cmd_vel",Twist, queue_size=10)
rospy.Subscriber('/joy', Joy, self._vector_teleop)
def _update_configuration_limits(self,config):
self.x_vel_limit_mps = config.teleop_x_vel_limit_mps
self.y_vel_limit_mps = config.teleop_y_vel_limit_mps
self.yaw_rate_limit_rps = config.teleop_yaw_rate_limit_rps
self.accel_lim = config.teleop_accel_limit_mps2
self.yaw_accel_lim = config.teleop_yaw_accel_limit_rps2
self.config_updated = True
def _parse_joy_input(self,joyMessage):
raw_button_states = [True] * NUMBER_OF_MOMENTARY_INPUTS
self.button_state = [False] * NUMBER_OF_MOMENTARY_INPUTS
for i in range(NUMBER_OF_MOMENTARY_INPUTS):
inputs_for_req = self.ctrl_map['momentary'][i]
for item in inputs_for_req:
if item['is_button']:
if item['set_val'] == joyMessage.buttons[item['index']]:
raw_button_states[i] &= True
else:
raw_button_states[i] = False
else:
temp = joyMessage.axes[item['index']]
if (item['invert_axis']):
temp *= -1.0
if (temp >= item['set_thresh']):
raw_button_states[i] &= True
else:
raw_button_states[i] = False
if (True == raw_button_states[i]):
self.db_cnt[i]+=1
if (self.db_cnt[i] > 10):
self.db_cnt[i] = 10
self.button_state[i] = True
else:
self.button_state[i] = False
self.db_cnt[i] = 0
self.axis_value = [0.0] * NUMBER_OF_AXIS_INPUTS
for i in range(NUMBER_OF_AXIS_INPUTS):
axis_input_map = self.ctrl_map['axis_range'][i]
temp = joyMessage.axes[axis_input_map['index']]
if (axis_input_map['invert_axis']):
temp *= -1.0
self.axis_value[i] = temp
def _vector_teleop(self, joyMessage):
self._parse_joy_input(joyMessage)
if self.button_state[MAP_REC_GOAL_IDX] == 1:
if (False == self.goalrecorded):
temp = Bool()
temp.data = True
self.goalrecorder_pub.publish(temp)
self.goalrecorded= True
else:
self.goalrecorded= False
if self.button_state[MAP_DTZ_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = DTZ_REQUEST
elif self.button_state[MAP_PWRDWN_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_STANDBY_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = STANDBY_REQUEST
elif self.button_state[MAP_TRACTOR_IDX]:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE'
self.cfg_cmd.gp_param = TRACTOR_REQUEST
else:
self.cfg_cmd.gp_cmd = 'GENERAL_PURPOSE_CMD_NONE'
self.cfg_cmd.gp_param = 0
if ('GENERAL_PURPOSE_CMD_NONE' != self.cfg_cmd.gp_cmd):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = True
elif (True == self.send_cmd_none):
self.cfg_cmd.header.stamp = rospy.get_rostime()
self.cfg_pub.publish(self.cfg_cmd)
self.cfg_cmd.header.seq
self.send_cmd_none = False
elif (False == self.send_cmd_none):
if self.button_state[MAP_DEADMAN_IDX]:
self.motion_cmd.linear.x = (self.axis_value[MAP_TWIST_LIN_X_IDX] * self.x_vel_limit_mps)
self.motion_cmd.linear.y = (self.axis_value[MAP_TWIST_LIN_Y_IDX] * self.y_vel_limit_mps)
self.motion_cmd.angular.z = (self.axis_value[MAP_TWIST_ANG_Z_IDX] * self.yaw_rate_limit_rps)
self.last_motion_command_time = rospy.get_time()
else:
self.motion_cmd.linear.x = 0.0
self.motion_cmd.linear.y = 0.0
self.motion_cmd.angular.z = 0.0
dt = rospy.get_time() - self.last_joy
self.last_joy = rospy.get_time()
if (dt >= 0.01):
self.limited_cmd.linear.x = slew_limit(self.motion_cmd.linear.x,
self.limited_cmd.linear.x,
self.accel_lim, dt)
self.limited_cmd.linear.y = slew_limit(self.motion_cmd.linear.y,
self.limited_cmd.linear.y,
self.accel_lim, dt)
self.limited_cmd.angular.z = slew_limit(self.motion_cmd.angular.z,
self.limited_cmd.angular.z,
self.yaw_accel_lim, dt)
if ((rospy.get_time() - self.last_motion_command_time) < 2.0):
self.motion_pub.publish(self.limited_cmd)
if self.button_state[MAP_DEADMAN_IDX] and self.button_state[MAP_MAN_OVVRD_IDX]:
self.override_pub.publish(self.motion_cmd)
| StanleyInnovation/vector_v1 | vector_common/vector_ros/src/vector/vector_teleop.py | Python | bsd-3-clause | 12,505 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
spike – a package manager running on top of git
Copyright © 2012, 2013, 2014 Mattias Andrée ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import os
from subprocess import Popen
from library.libspike import *
from auxiliary.argparser import *
from auxiliary.printhacks import *
SPIKE_VERSION = '0.1'
'''
This version of spike
'''
SPIKE_DEBUG = os.getenv('SPIKE_DEBUG', '') == 'yes'
'''
Whether spike has been started in debug mode
'''
class Spike():
'''
Spike is your number one package manager
'''
def __init__(self):
'''
Constructor
'''
self.version = SPIKE_VERSION
self.execprog = 'spike'
self.prog = 'spike'
def mane(self, args):
'''
Run this method to invoke Spike as if executed from the shell
Exit values: 0 - Successful
1 - Option use error
2 - Non-option argument use error
3 - -h(--help) was used
4 - Invalid option argument
5 - Root does not exist
6 - Scroll does not exist
7 - Pony is not installed
8 - Pony conflict
9 - Dependency does not exist
10 - File is already claimed
11 - File was claimed for another pony
12 - File does not exist
13 - File already exists
14 - Information field is not definied
15 - Starting interactive mode from pipe
16 - Compile error
17 - Installation error, usually because --private or root is needed
18 - Private installation is not supported
19 - Non-private installation is not supported
20 - Scroll error
21 - Pony ride error
22 - Proofread found scroll error
23 - File access denied
24 - Cannot pull git repository
25 - Cannot checkout git repository
26 - File is of wrong type, normally a directory or regular file when the other is expected
27 - Corrupt database
28 - Pony is required by another pony
29 - Circular make dependency
254 - User aborted
255 - Unknown error
@param args:list<str> Command line arguments, including invoked program alias ($0)
'''
self.execprog = args[0].split('/')[-1]
usage = self.prog + ' [command [option]... [FILE... | FILE... SCROLL | SCROLL...]]'
usage = usage.replace('spike', '\033[35m' 'spike' '\033[00m')
usage = usage.replace('command', '\033[33m' 'command' '\033[00m')
usage = usage.replace('option', '\033[33m' 'option' '\033[00m')
usage = usage.replace('FILE', '\033[04m' 'FILE' '\033[00m')
usage = usage.replace('SCROLL', '\033[04m' 'SCROLL' '\033[00m')
if tty:
usage = usage.replace('\033[04m', '\033[34m')
usage = usage.replace('\033[', '\0')
for sym in ('[', ']', '(', ')', '|', '...', '*'):
usage = usage.replace(sym, '\033[02m' + sym + '\033[22m')
usage = usage.replace('\0', '\033[')
opts = ArgParser('spike', 'a package manager running on top of git', usage,
'spike is used to spike your system with new ponies or and new versions\n'
'of ponies (known on other systems as packages). spike the capaility of\n'
'not only installing ponies, but also archive an installation and\n'
'simply roll back to it in case the system broke, an must have feature\n'
'on unstable OS:es. spike uses so called scrolls to install ponies,\n'
'these are written, for maximum simplicity and portability, in Python 3\n'
'and a collection of these are distributed with spike and updated when\n'
'spike is updated. But you can add scroll repositories to spike on your\n'
'local installation.', tty)
opts.add_argumentless(['-v', '--version'], help = 'Print program name and version')
opts.add_argumentless(['-h', '--help'], help = 'Print this help')
opts.add_argumentless(['-c', '--copyright'], help = 'Print copyright information')
opts.add_argumentless(['-B', '--bootstrap'], help = 'Update spike and scroll repositories\n'
'slaves: [--no-verify]')
opts.add_argumentless(['-F', '--find'], help = 'Find a scroll either by name or by ownership\n'
'slaves: [--owner | --written=]')
opts.add_argumentless(['-W', '--write'], help = 'Install a pony (package) from scroll\n'
'slaves: [--pinpal= | --private] [--asdep | --asexplicit] [--nodep] [--force] [--shred]')
opts.add_argumentless(['-U', '--update'], help = 'Update to new versions of the installed ponies\n'
'slaves: [--pinpal= | --private] [--ignore=]... [--shred]')
opts.add_argumentless(['-E', '--erase'], help = 'Uninstall a pony\n'
'slaves: [--pinpal= | --private] [--shred]')
opts.add_argumentless([ '--demote'], help = 'Demote pony to a dependency\n'
'slaves: [--private]')
opts.add_argumentless([ '--promote'], help = 'Promote pony to explicitly installed\n'
'slaves: [--private]')
opts.add_argumented( ['-X', '--ride'], arg = 'SCROLL', help = 'Execute a scroll after best effort\n'
'slaves: [--private]')
opts.add_argumentless(['-R', '--read'], help = 'Get scroll information\n'
'slaves: (--list | [--info=...] [--written=])')
opts.add_argumentless(['-C', '--claim'], help = 'Claim one or more files as owned by a pony\n'
'slaves: [--recursive | --entire] [--private] [--force]')
opts.add_argumentless(['-D', '--disclaim'], help = 'Disclaim one or more files as owned by a pony\n'
'slaves: [--recursive] [--private]')
opts.add_argumented( ['-A', '--archive'], arg = 'ARCHIVE', help = 'Create an archive of everything that is currently installed.\n'
'slaves: [--scrolls]')
opts.add_argumented( ['--restore-archive'], arg = 'ARCHIVE', help = 'Roll back to an archived state of the system\n'
'slaves: [--shared | --full | --old] [--downgrade | --upgrade] [--shred]')
opts.add_argumentless(['-N', '--clean'], help = 'Uninstall unneeded ponies\n'
'slaves: [--private] [--shred]')
opts.add_argumentless(['-P', '--proofread'], help = 'Verify that a scroll is correct')
opts.add_argumentless(['-S', '--example-shot'], help = 'Display example shot for scrolls\n'
'slaves: [--viewer=] [--all-at-once]')
opts.add_argumentless(['-I', '--interactive'], help = 'Start in interative graphical terminal mode\n'
'(supports installation and uninstallation only)\n'
'slaves: [--shred]')
opts.add_argumentless(['-3', '--sha3sum'], help = 'Calculate the SHA3 checksums for files\n'
'(do not expect files to be listed in order)')
opts.add_argumentless(['-o', '--owner'], help = 'Find owner pony for file')
opts.add_argumented( ['-w', '--written'], arg = 'boolean', help = 'Search only for installed (\'yes\' or \'y\') or not installed (\'no\' or \'n\') ponies')
opts.add_argumented( [ '--pinpal'], arg = 'ROOT', help = 'Mounted system for which to do installation or unstallation')
opts.add_argumentless(['-u', '--private'], help = 'Private pony installation')
opts.add_argumentless([ '--asdep'], help = 'Install pony as implicitly installed (a dependency)')
opts.add_argumentless([ '--asexplicit'], help = 'Install pony as explicitly installed (no longer a dependency)')
opts.add_argumentless([ '--nodep'], help = 'Do not install dependencies')
opts.add_argumentless([ '--force'], help = 'Ignore file claims')
opts.add_argumentless(['-i', '--ignore'], help = 'Ignore update of a pony')
opts.add_argumentless(['-l', '--list'], help = 'List files claimed (done at installation) for a pony')
opts.add_argumented( ['-f', '--info'], arg = 'FIELD', help = 'Retrieve a specific scroll information field')
opts.add_argumentless([ '--recursive'], help = 'Recursively claim or disclaim directories')
opts.add_argumentless([ '--entire'], help = 'Recursively claim directories and their future content')
opts.add_argumentless(['-s', '--scrolls'], help = 'Do only archive scrolls, no installed files')
opts.add_argumentless([ '--shared'], help = 'Reinstall only ponies that are currently installed and archived')
opts.add_argumentless([ '--full'], help = 'Uninstall ponies that are not archived')
opts.add_argumentless([ '--old'], help = 'Reinstall only ponies that are currently not installed')
opts.add_argumentless([ '--downgrade'], help = 'Do only perform pony downgrades')
opts.add_argumentless([ '--upgrade'], help = 'Do only perform pony upgrades')
opts.add_argumentless([ '--shred'], help = 'Perform secure removal with `shred` when removing old files')
opts.add_argumentless([ '--no-verify'], help = 'Skip verification of signatures')
opts.add_argumentless(['-a', '--all-at-once'], help = 'Display all example shots in one single process instance')
opts.add_argumented( [ '--viewer'], arg = 'VIEWER', help = 'Select image viewer for example shots')
if not opts.parse(args):
printerr(self.execprog + ': use of unrecognised option')
exit(1)
longmap = {}
longmap['-v'] = '--version'
longmap['-h'] = '--help'
longmap['-c'] = '--copyright'
longmap['-B'] = '--bootstrap'
longmap['-F'] = '--find'
longmap['-W'] = '--write'
longmap['-U'] = '--update'
longmap['-E'] = '--erase'
longmap['-X'] = '--ride'
longmap['-R'] = '--read'
longmap['-C'] = '--claim'
longmap['-D'] = '--disclaim'
longmap['-A'] = '--archive'
longmap['-N'] = '--clean'
longmap['-P'] = '--proofread'
longmap['-S'] = '--example-shot'
longmap['-I'] = '--interactive'
longmap['-3'] = '--sha3sum'
longmap['-o'] = '--owner'
longmap['-w'] = '--written'
longmap['-u'] = '--private'
longmap['-i'] = '--ignore'
longmap['-l'] = '--list'
longmap['-f'] = '--info'
longmap['-s'] = '--scrolls'
longmap['-a'] = '--all-at-once'
exclusives = set()
for opt in 'vhcBFWUEXRCDANPSI3':
exclusives.add('-' + opt)
exclusives.add('--restore-archive')
exclusives.add('--demote')
exclusives.add('--promote')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
for opt in opts.opts:
if (opt != '-i') and (opt != '-f'): # --ignore, --info
if (opts.opts[opt] is not None) and (len(opts.opts[opt]) > 1):
option = opt
if option in longmap:
option += '(' + longmap[option] + ')'
printerr('%s: %s is used multiple times' % (self.execprog, option))
exit(1)
allowed = set()
for opt in exclusives:
if opts.opts[opt] is not None:
allowed.add(opt)
break
exclusives = set()
exit_value = 0
def comma_split(values):
if values is None:
return None
rc = []
for value in values:
rc += value.split(',')
return rc
try:
if opts.opts['-v'] is not None:
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
self.print_version()
elif opts.opts['-h'] is not None:
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
opts.help()
exit_value = 3
elif opts.opts['-c'] is not None:
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
self.print_copyright()
elif opts.opts['-3'] is not None:
opts.test_allowed(self.execprog, allowed, longmap, True)
LibSpike.initialise()
exit_value = self.sha3sum(opts.files)
elif opts.opts['-B'] is not None:
allowed.add('--no-verify')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise()
exit_value = self.bootstrap(opts.opts['--no-verify'] is None)
elif opts.opts['-F'] is not None:
exclusives.add('-o')
exclusives.add('-w')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
opts.test_allowed(self.execprog, allowed, longmap, True)
allowed.add('-o')
allowed.add('-w')
if opts.opts['-w'] is not None:
if opts.opts['-w'][0] not in ('y', 'yes', 'n', 'no'):
printerr(self.execprog + ': only \'yes\', \'y\', \'no\' and \'n\' are allowed for -w(--written)')
exit(4)
LibSpike.initialise()
exit_value = self.find_scroll(opts.files,
installed = opts.opts['-w'][0][0] == 'y',
notinstalled = opts.opts['-w'][0][0] == 'n')
elif opts.opts['-o'] is not None:
opts.test_files(self.execprog, 1, None, True)
LibSpike.initialise()
exit_value = self.find_owner(opts.files)
else:
LibSpike.initialise()
exit_value = self.find_scroll(opts.files, installed = True, notinstalled = True)
elif opts.opts['-W'] is not None:
exclusives.add('--pinpal')
exclusives.add('-u')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
exclusives = set()
exclusives.add('--asdep')
exclusives.add('--asexplicit')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
allowed.add('--pinpal')
allowed.add('-u')
allowed.add('--asdep')
allowed.add('--asexplicit')
allowed.add('--nodep')
allowed.add('--force')
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.write(opts.files,
root = opts.opts['--pinpal'][0] if opts.opts['--pinpal'] is not None else '/',
private = opts.opts['-u'] is not None,
explicitness = 1 if opts.opts['--asexplicit'] is not None else
-1 if opts.opts['--asdep'] is not None else 0,
nodep = opts.opts['--nodep'] is not None,
force = opts.opts['--force'] is not None)
elif opts.opts['-U'] is not None:
allowed.add('--pinpal')
allowed.add('-i')
allowed.add('-u')
allowed.add('--shred')
exclusives.add('--pinpal')
exclusives.add('-u')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.update(root = opts.opts['--pinpal'][0] if opts.opts['--pinpal'] is not None else '/',
ignores = comma_split(opts.opts['-i']) if opts.opts['-i'] is not None else [],
private = opts.opts['-u'] is not None)
elif opts.opts['-E'] is not None:
exclusives.add('--pinpal')
exclusives.add('-u')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
allowed.add('--pinpal')
allowed.add('-u')
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.erase(opts.files,
root = opts.opts['--pinpal'][0] if opts.opts['--pinpal'] is not None else '/',
private = opts.opts['-u'] is not None)
elif opts.opts['-X'] is not None:
allowed.add('-u')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, 1, True)
LibSpike.initialise()
exit_value = self.ride(opts.files[0],
private = opts.opts['-u'] is not None)
#elif opts.opts['--demote'] is not None: ### TODO: implement demote
# allowed.add('-u')
# opts.test_allowed(self.execprog, allowed, longmap, True)
# opts.test_files(self.execprog, 1, None, True)
# LibSpike.initialise()
# exit_value = self.demote(opts.files,
# private = opts.opts['-u'] is not None)
#elif opts.opts['--promote'] is not None: ### TODO: implement promote
# allowed.add('-u')
# opts.test_allowed(self.execprog, allowed, longmap, True)
# opts.test_files(self.execprog, 1, None, True)
# LibSpike.initialise()
# exit_value = self.promote(opts.files,
# private = opts.opts['-u'] is not None)
elif opts.opts['-R'] is not None:
exclusives.add('-l')
exclusives.add('-f')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
allowed.add('-l')
allowed.add('-f')
if opts.opts['-l'] is None:
allowed.add('-w')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
if opts.opts['-l'] is not None:
exit_value = self.read_files(opts.files)
else:
if opts.opts['-w'] is not None:
if opts.opts['-w'][0] not in ('y', 'yes', 'n', 'no'):
printerr(self.execprog + ': only \'yes\', \'y\', \'no\' and \'n\' are allowed for -w(--written)')
exit(4)
LibSpike.initialise()
exit_value = self.read_info(opts.files, field = comma_split(opts.opts['-f']),
installed = opts.opts['-w'][0][0] == 'y',
notinstalled = opts.opts['-w'][0][0] == 'n')
else:
LibSpike.initialise()
exit_value = self.read_info(opts.files, field = comma_split(opts.opts['-f']))
elif opts.opts['-C'] is not None:
exclusives.add('--recursive')
exclusives.add('--entire')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
allowed.add('--recursive')
allowed.add('--entire')
allowed.add('-u')
allowed.add('--force')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 2, None, True)
LibSpike.initialise()
exit_value = self.claim(opts.files[:-1], opts.files[-1],
recursiveness = 1 if opts.opts['--recursive'] is not None else
2 if opts.opts['--entire'] is not None else 0,
private = opts.opts['-u'] is not None,
force = opts.opts['--force'] is not None)
elif opts.opts['-D'] is not None:
allowed.add('--recursive')
allowed.add('-u')
self.test_allowed(opts.opts, allowed, longmap, True)
opts.test_files(self.execprog, 2, None, True)
LibSpike.initialise()
exit_value = self.disclaim(opts.files[:-1], opts.files[-1],
recursive = opts.opts['--recursive'] is not None,
private = opts.opts['-u'] is not None)
elif opts.opts['-A'] is not None:
allowed.add('-s')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise()
exit_value = self.archive(opts.opts['-A'][0], scrolls = opts.opts['-s'] is not None)
elif opts.opts['--restore-archive'] is not None:
exclusives.add('--shared')
exclusives.add('--full')
exclusives.add('--old')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
exclusives = set()
exclusives.add('--downgrade')
exclusives.add('--upgrade')
opts.test_exclusiveness(self.execprog, exclusives, longmap, True)
allowed.add('--shared')
allowed.add('--full')
allowed.add('--old')
allowed.add('--downgrade')
allowed.add('--upgrade')
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.rollback(opts.opts['--restore-archive'][0],
keep = opts.opts['--full'] is None,
skip = opts.opts['--shared'] is not None,
gradeness = -1 if opts.opts['--downgrade'] is not None else
1 if opts.opts['--upgrade'] is not None else 0)
elif opts.opts['-P'] is not None:
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
LibSpike.initialise()
exit_value = self.proofread(opts.files)
elif opts.opts['-N'] is not None:
allowed.add('--private')
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.clean(private = opts.opts['--private'] is not None)
elif opts.opts['-S'] is not None:
allowed.add('--viewer')
allowed.add('-a')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 1, None, True)
env_display = os.environ['DISPLAY']
default_viewer = 'xloadimage' if (env_display is not None) and env_display.startsWith(':') else 'jfbview'
LibSpike.initialise()
exit_value = self.example_shot(opts.files,
viewer = opts.opts['--viewer'][0] if opts.opts['--viewer'] is not None else default_viewer,
all_at_once = opts.opts['-a'] is not None)
elif opts.opts['-I'] is not None:
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.interactive()
else:
allowed.add('--shred')
opts.test_allowed(self.execprog, allowed, longmap, True)
opts.test_files(self.execprog, 0, 0, True)
LibSpike.initialise(shred = opts.opts['--shred'] is not None)
exit_value = self.interactive()
except Exception as err:
exit_value = 255
printerr('%s: %s' % (self.execprog, str(err)))
if SPIKE_DEBUG:
raise err
if exit_value == 27:
printerr('%s: \033[01;31m%s\033[00m' % (self.execprog, 'corrupt database'))
LibSpike.terminate()
exit(exit_value)
def print_version(self):
'''
Prints spike followed by a blank spacs and the version of spike to stdout
'''
print('spike ' + self.version)
def print_copyright(self):
'''
Prints spike copyright notice to stdout
'''
print('spike – a package manager running on top of git\n'
'\n'
'Copyright © 2012, 2013, 2014 Mattias Andrée ([email protected])\n'
'\n'
'This program is free software: you can redistribute it and/or modify\n'
'it under the terms of the GNU General Public License as published by\n'
'the Free Software Foundation, either version 3 of the License, or\n'
'(at your option) any later version.\n'
'\n'
'This program is distributed in the hope that it will be useful,\n'
'but WITHOUT ANY WARRANTY; without even the implied warranty of\n'
'MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n'
'GNU General Public License for more details.\n'
'\n'
'You should have received a copy of the GNU General Public License\n'
'along with this program. If not, see <http://www.gnu.org/licenses/>.')
def bootstrap(self, verify):
'''
Update the spike and the scroll archives
@param verify:bool Whether to verify signatures
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int)→void
Feed a directory path and 0 when a directory is enqueued for bootstraping.
Feed a directory path and 1 when a directory bootstrap process is beginning.
Feed a directory path and 2 when a directory bootstrap process has ended.
'''
def __init__(self):
self.colour_map = (3, 4, 2)
self.message_map = ('QUEUED', 'WORKING', 'DONE')
def __call__(self, directory, state):
print('\033[01;3%im%s [%s]\033[00m' % (self.colour_map[state], directory, self.message_map[state]))
return LibSpike.bootstrap(Agg(), verify)
def find_scroll(self, patterns, installed = True, notinstalled = True):
'''
Search for a scroll
@param patterns:list<str> Regular expression search patterns
@param installed:bool Look for installed packages
@param notinstalled:bool Look for not installed packages
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str)→void
Feed a scroll when one matching one of the patterns has been found.
'''
def __init__(self):
pass
def __call__(self, found):
print(found)
return LibSpike.find_scroll(Agg(), patterns, installed, notinstalled)
def find_owner(self, files):
'''
Search for a files owner pony, includes only installed ponies
@param files:list<string> Files for which to do lookup
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str?)→void
Feed a file path and a scroll when an owner has been found.
Feed a file path and `None` when it as been determined that their is no owner.
'''
def __init__(self):
pass
def __call__(self, filepath, owner):
if owner is None:
print('%s is owner by %s\n' % (filepath, owner))
else:
print('%s has not owner\n' % filepath)
return LibSpike.find_owner(Agg(), files)
def write(self, scrolls, root = '/', private = False, explicitness = 0, nodep = False, force = False):
'''
Install ponies from scrolls
@param scrolls:list<str> Scroll to install
@param root:str Mounted filesystem to which to perform installation
@param private:bool Whether to install as user private
@param explicitness:int -1 for install as dependency, 1 for install as explicit, and 0 for explicit if not previously as dependency
@param nodep:bool Whether to ignore dependencies
@param force:bool Whether to ignore file claims
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str?, int, [*])→(void|bool|str|int?)
Feed a scroll (`None` only at state 0, 3, 6, 7 and 9) and a state (can be looped) during the process of a scroll.
The states are: 0 - inspecting installed scrolls
1 - proofreading
2 - scroll added because of being updated
3 - resolving conflicts
4 - scroll added because of dependency. Additional parameters: requirers:list<str>
5 - scroll removed because due to being replaced. Additional parameters: replacer:str
6 - verify installation. Additional parameters: fresh_installs:list<str>, reinstalls:list<str>, update:list<str>, downgrading:list<str>, skipping:list<str>
Return: accepted:bool
7 - inspecting non-install scrolls for providers
8 - select provider pony. Additional parameters: options:list<str>
Return: select provider:str? `None` if aborted
9 - select when to build ponies which require interaction. Additional parameters: interactive:list<str>, allowed:int
Return: when:excl-flag? `None` if aborted
10 - fetching source. Additional parameters: source:str, progress state:int, progress end:int
11 - verifying source. Additional parameters: progress state:int, progress end:int
12 - compiling
13 - file conflict check: Additional parameters: progress state:int, progress end:int
14 - installing files: Additional parameters: progress state:int, progress end:int
when:excl-flag values: 0 - Build whenever
1 - Build early
2 - Build early and fetch separately
3 - Build late
allowed:int values: The union of all `1 << when` with allowed `when`
'''
def __init__(self):
self.update_add = set()
self.dep_add = {}
self.replace_remove = {}
self.scrls = []
for i in range(6):
self.scrls.append([0, {}])
self.scrls[0] = self.scrls[1]
def __call__(self, scroll, state, *args):
if type(self) == Spike:
if scroll.equals('rarity'):
scroll = scroll + '♥'
elif scroll.startswith('rarity='):
scroll = scroll.replace('=', '♥=')
if state == 0:
print('Inspecting installed scrolls')
elif state == 1:
print('Proofreading: %s' % scroll)
elif state == 2:
self.updateadd.add(scroll[:(scroll + '=').find('=')])
elif state == 3:
print('Resolving conflicts')
elif state == 4:
if scroll in self.dep_add:
self.dep_add[scroll] += args[0]
else:
self.dep_add[scroll] = args[0]
elif state == 5:
if scroll in self.replace_remove:
self.replace_remove[scroll] += args[0]
else:
self.replace_remove[scroll] = args[0]
elif state == 6:
fresh_installs, reinstalls, update, downgrading, skipping = args
for scrl in skipping:
print('Skipping %s' % scrl)
for scrl in fresh_installs:
print('Installing %s' % scrl)
for scrl in reinstalls:
print('Reinstalling %s' % scrl)
for scrl in update:
if scrl[:scrl.find('=')] not in self.update_add:
print('Explicitly updating %s' % scrl)
for scrl in update:
if scrl[:scrl.find('=')] in self.updat_eadd:
print('Updating %s' % scrl)
for dep in self.dep_add:
print('Adding %s, required by: %s' % (dep, ', '.join(self.dep_add[dep])))
for replacee in self.replace_remove:
print('Replacing %s with %s' % (replacee, ', '.join(self.replace_remove[replacee])))
for scrl in downgrading:
print('Downgrading %s' % scrl)
while True:
print('\033[01mContinue? (y/n)\033[00m')
answer = input().lower()
if answer.startswith('y') or answer.startswith('n'):
return answer.startswith('y')
elif state == 7:
print('Inspecting scroll repository for providers')
elif state == 8:
print('\033[01mSelect provider for virtual pony: %s\033[00m' % scroll)
i = 0
for prov in args[0]:
i += 1
print('%i: %s' % (i, prov))
print('> ', end='')
sel = input()
try:
sel = int(sel)
if 1 <= sel <= len(args):
return args[0][sel - 1]
except:
pass
return None
elif state == 9:
print('There are sone scrolls that require pony interaction to be build:')
for scroll in args[0]:
print(' %s' % scroll)
allowed = args[1]
print('\033[01mWhen do you want to build scroll that require interaction:\033[00m')
if (allowed & (1 << 0)) != 0:
print(' w - Whenever, I will not leave my precious magic box')
if (allowed & (1 << 1)) != 0:
print(' e - Before all other scrolls')
if (allowed & (1 << 2)) != 0:
print(' E - Before all other scrolls, and download others\' sources afterwards')
if (allowed & (1 << 3)) != 0:
print(' l - After all other scrolls')
print(' a - Abort!')
while True:
when = input()
if (allowed & (1 << 0)) != 0:
if when == 'w' or when == 'W':
return 0
if (allowed & (1 << 1)) != 0:
if when == 'e':
return 1
if (allowed & (1 << 2)) != 0:
if when == 'E':
return 2
if (allowed & (1 << 3)) != 0:
if when == 'l' or when == 'L':
return 3
if when == 'a':
return None
print('\033[01mInvalid option!\033[00m')
else:
if scroll not in self.scrls[state - 10][1]:
self.scrls[state - 10][0] += 1
self.scrls[state - 10][1][scroll] = self.scrls[state - 10][0]
(scrli, scrln) = (self.scrls[state - 10][1][scroll], self.scrls[state - 10][0])
if scrli != scrln:
if state != 12:
print('\033[%iAm', scrln - scrli)
if state == 10:
(source, progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Downloading %s: %s' % (bar, scrli, scrln, scroll, source))
elif state == 11:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Verifing %s' % (bar, scrli, scrln, scroll))
elif state == 12:
print('(%i/%i) Compiling %s' % (scrli + 1, scrln, scroll))
elif state == 13:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Checking file conflicts for %s' % (bar, scrli, scrln, scroll))
elif state == 14:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Installing %s' % (bar, scrli, scrln, scroll))
if scrli != scrln:
if state != 12:
print('\033[%iBm', scrln - (scrli + 1))
return None
return LibSpike.write(Agg(), scrolls, root, private, explicitness, nodep, force)
def update(self, root = '/', ignores = [], private = False):
'''
Update installed ponies
@param root:str Mounted filesystem to which to perform installation
@param ignores:list<str> Ponies not to update
@param private:bool Whether to update user private packages
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str?, int, [*])→(void|bool|str|int?)
Feed a scroll (`None` only at state 0, 3, 6, 7 and 9) and a state (can be looped) during the process of a scroll.
The states are: 0 - inspecting installed scrolls
1 - proofreading
2 - scroll added because of being updated
3 - resolving conflicts
4 - scroll added because of dependency. Additional parameters: requirers:list<str>
5 - scroll removed because due to being replaced. Additional parameters: replacer:str
6 - verify installation. Additional parameters: fresh_installs:list<str>, reinstalls:list<str>, update:list<str>, downgrading:list<str>, skipping:list<str>
Return: accepted:bool
7 - inspecting non-install scrolls for providers
8 - select provider pony. Additional parameters: options:list<str>
Return: select provider:str? `None` if aborted
9 - select when to build ponies which require interaction. Additional parameters: interactive:list<str>, allowed:int
Return: when:excl-flag? `None` if aborted
10 - fetching source. Additional parameters: source:str, progress state:int, progress end:int
11 - verifying source. Additional parameters: progress state:int, progress end:int
12 - compiling
13 - file conflict check: Additional parameters: progress state:int, progress end:int
14 - installing files: Additional parameters: progress state:int, progress end:int
when:excl-flag values: 0 - Build whenever
1 - Build early
2 - Build early and fetch separately
3 - Build late
allowed:int values: The union of all `1 << when` with allowed `when`
'''
def __init__(self):
self.update_add = set()
self.dep_add = {}
self.replace_remove = {}
self.scrls = []
for i in range(6):
self.scrls.append([0, {}])
self.scrls[0] = self.scrls[1]
def __call__(self, scroll, state, *args):
if type(self) == Spike:
if scroll.equals('rarity'):
scroll = scroll + '♥'
elif scroll.startswith('rarity='):
scroll = scroll.replace('=', '♥=')
if state == 0:
print('Inspecting installed scrolls')
elif state == 1:
print('Proofreading: %s' % scroll)
elif state == 2:
self.update_add.add(scroll[:(scroll + '=').find('=')])
elif state == 3:
print('Resolving conflicts')
elif state == 4:
if scroll in self.dep_add:
self.dep_add[scroll] += args[0]
else:
self.dep_add[scroll] = args[0]
elif state == 5:
if scroll in self.replace_remove:
self.replace_remove[scroll] += args[0]
else:
self.replace_remove[scroll] = args[0]
elif state == 6:
fresh_installs, reinstalls, update, downgrading, skipping = args
for scrl in skipping:
print('Skipping %s' % scrl)
for scrl in fresh_installs:
print('Installing %s' % scrl)
for scrl in reinstalls:
print('Reinstalling %s' % scrl)
for scrl in update:
print('Updating %s' % scrl)
for dep in self.dep_add:
print('Adding %s, required by: %s' % (dep, ', '.join(self.dep_add[dep])))
for replacee in self.replace_remove:
print('Replacing %s with %s' % (replacee, ', '.join(self.replace_remove[replacee])))
for scrl in downgrading:
print('Downgrading %s' % scrl)
while True:
print('\033[01mContinue? (y/n)\033[00m')
answer = input().lower()
if answer.startswith('y') or answer.startswith('n'):
return answer.startswith('y')
elif state == 7:
print('Inspecting scroll repository for providers')
elif state == 8:
print('\033[01mSelect provider for virtual pony: %s\033[00m' % scroll)
i = 0
for prov in args[0]:
i += 1
print('%i: %s' % (i, prov))
print('> ', end='')
sel = input()
try:
sel = int(sel)
if 1 <= sel <= len(args):
return args[0][sel - 1]
except:
pass
return None
elif state == 9:
print('There are sone scrolls that require pony interaction to be build:')
for scroll in args[0]:
print(' %s' % scroll)
allowed = args[1]
print('\033[01mWhen do you want to build scroll that require interaction:\033[00m')
if (allowed & (1 << 0)) != 0:
print(' w - Whenever, I will not leave my precious magic box')
if (allowed & (1 << 1)) != 0:
print(' e - Before all other scrolls')
if (allowed & (1 << 2)) != 0:
print(' E - Before all other scrolls, and download others\' sources afterwards')
if (allowed & (1 << 3)) != 0:
print(' l - After all other scrolls')
print(' a - Abort!')
while True:
when = input()
if (allowed & (1 << 0)) != 0:
if when == 'w' or when == 'W':
return 0
if (allowed & (1 << 1)) != 0:
if when == 'e':
return 1
if (allowed & (1 << 2)) != 0:
if when == 'E':
return 2
if (allowed & (1 << 3)) != 0:
if when == 'l' or when == 'L':
return 3
if when == 'a':
return None
print('\033[01mInvalid option!\033[00m')
else:
if scroll not in self.scrls[state - 10][1]:
self.scrls[state - 10][0] += 1
self.scrls[state - 10][1][scroll] = self.scrls[state - 10][0]
(scrli, scrln) = (self.scrls[state - 10][1][scroll], self.scrls[state - 10][0])
if scrli != scrln:
if state != 12:
print('\033[%iAm', scrln - scrli)
if state == 10:
(source, progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Downloading %s: %s' % (bar, scrli, scrln, scroll, source))
elif state == 11:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Verifing %s' % (bar, scrli, scrln, scroll))
elif state == 12:
print('(%i/%i) Compiling %s' % (scrli + 1, scrln, scroll))
elif state == 13:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Checking file conflicts for %s' % (bar, scrli, scrln, scroll))
elif state == 14:
(progress, end) = args
bar = '[\033[01;3%im%s\033[00m]'
bar %= (2, 'DONE') if progress == end else (3, '%2.1f' % (progress / end))
print('[%s] (%i/%i) Installing %s' % (bar, scrli, scrln, scroll))
if scrli != scrln:
if state != 12:
print('\033[%iBm', scrln - (scrli + 1))
return None
return LibSpike.update(Agg(), root, ignore)
def erase(self, ponies, root = '/', private = False):
'''
Uninstall ponies
@param ponies:list<str> Ponies to uninstall
@param root:str Mounted filesystem from which to perform uninstallation
@param private:bool Whether to uninstall user private ponies rather than user shared ponies
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int, int)→void
Feed a scroll, removal progress state and removal progress end state, continuously during the progress,
this begins by feeding the state 0 when a scroll is cleared for removal, when all is enqueued the removal begins.
'''
def __init__(self):
self.scrolls = {}
self.next = 0
self.pos = 0
def __call__(self, scroll, progress, end):
if type(self) == Spike:
if scroll.equals('rarity'):
scroll = scroll + '😢'
elif scroll.startswith('rarity='):
scroll = scroll.replace('=', '😢=')
if directory not in self.dirs:
self.dirs[directory] = self.next
self.next += 1
p = self.dirs[directory]
if p > self.pos:
print('\033[%iBm', p - self.pos)
elif p < self.pos:
print('\033[%iAm', self.pos - p)
s = '\033[01;3%im%s'
if progress == 0:
s %= (3, 'WAIT')
elif progress == end:
s %= (2, 'DONE')
else:
s %= (1, '%2.1f' % progress * 100 / end)
print('[%s\033[00m] %s\n' % (s, directory))
self.pos = p + 1
return LibSpike.erase(Agg(), ponies, root, private)
def ride(self, pony, private = False):
'''
Execute pony after best effort
@param private:bool Whether the pony is user private rather than user shared
@return :byte Exit value, see description of `mane`
'''
return LibSpike.ride(pony, private)
def read_files(self, ponies):
'''
List files installed for ponies
@param ponies:list<str> Installed ponies for which to list claimed files
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str?, [bool])→void
Feed the pony and the file when a file is detected,
but `None` as the file if the pony is not installed.
If `None` is not passed, an additional argument is
passed: False normally, and True if the file is
recursively claimed at detection time.
'''
def __init__(self):
pass
def __call__(self, owner, filename, entire = False):
if filename is None:
printerr('%s is not installed' % owner)
elif entire:
print('%s:recursive: %s' % (owner, filename))
else:
print('%s: %s' % (owner, filename))
return LibSpike.read_files(Agg(), ponies)
def read_info(self, scrolls, field = None, installed = True, notinstalled = True):
'''
List information about scrolls
@param scrolls:list<str> Scrolls for which to list information
@param field:str?|list<str> Information field or fields to fetch, `None` for everything
@param installed:bool Whether to include installed scrolls
@param notinstalled:bool Whether to include not installed scrolls
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str?, str?, bool)→void
Feed the scroll, the field name and the information in the field when a scroll's information is read,
all (desired) fields for a scroll will come once, in an uninterrupted sequence. Additionally it is
feed whether or not the information concerns a installed or not installed scroll. The values for a
field is returned in an uninterrupted sequence, first the non-installed scroll, then the installed
scroll. If a scroll is not found the field name and the value is returned as `None`. If the field
name is not defined, the value is returned as `None`.
'''
def __init__(self):
self.metaerr = set()
def __call__(self, scroll, meta, info, isinstalled):
if meta is None:
printerr('Scroll %s was not found' % scroll)
elif info is None:
if meta not in self.metaerr:
printerr('Field %s was defined' % meta)
self.metaerr.add(meta)
else:
if installed == notinstalled:
print('%s: %s: %s: %s' % (scroll, meta, 'installed' if isinstalled else 'not installed', info))
else:
print('%s: %s: %s' % (scroll, meta, info))
return LibSpike.read_info(Agg(), scrolls, field, installed, notinstalled)
def claim(self, files, pony, recursiveness = 0, private = False, force = False):
'''
Claim one or more files as a part of a pony
@param files:list<str> File to claim
@param pony:str The pony
@param recursiveness:int 0 for not recursive, 1 for recursive, 2 for recursive at detection
@param private:bool Whether the pony is user private rather the user shared
@param force:bool Whether to override current file claim
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str)→void
Feed a file and it's owner when a file is already claimed
'''
def __init__(self):
pass
def __call__(self, filename, owner):
print('%s is already claimed by %s' % (filename, owner))
return LibSpike.claim(Agg(), files, pony, recursiveness, private, force)
def disclaim(self, files, pony, recursive = False, private = False):
'''
Disclaim one or more files as a part of a pony
@param files:list<str> File to disclaim
@param pony:str The pony
@param recursive:bool Whether to disclaim directories recursively
@param private:bool Whether the pony is user private rather the user shared
@return :byte Exit value, see description of `mane`
'''
return LibSpike.disclaim(files, pony, recursive, private)
def archive(self, archive, scrolls = False):
'''
Archive the current system installation state
@param archive:str The archive file to create
@param scrolls:bool Whether to only store scroll states and not installed files
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int, int, int, int)→void
Feed a scroll, scroll index, scroll count, scroll progress state and scroll progress end, continuously during the process
'''
def __init__(self):
self.scrolls = {}
self.next = 0
self.pos = 0
def __call__(self, scroll, scrolli, scrolln, progess, end):
if directory not in self.dirs:
self.dirs[directory] = self.next
self.next += 1
p = self.dirs[directory]
if p > self.pos:
print('\033[%iBm', p - self.pos)
elif p < self.pos:
print('\033[%iAm', self.pos - p)
s = '\033[01;3%im%s'
if progress == 0:
s %= (3, 'WAIT')
elif progress == end:
s %= (2, 'DONE')
else:
s %= (1, '%2.1f' % progress * 100 / end)
print('[%s\033[00m] (%i/%i) %s\n' % (s, scrolli, scrolln, scroll))
self.pos = p + 1
return LibSpike.archive(Agg(), archive, scrolls)
def rollback(self, archive, keep = False, skip = False, gradeness = 0):
'''
Roll back to an archived state
@param archive:str Archive to roll back to
@param keep:bool Keep non-archived installed ponies rather than uninstall them
@param skip:bool Skip rollback of non-installed archived ponies
@param gradeness:int -1 for downgrades only, 1 for upgrades only, 0 for rollback regardless of version
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int, int, int, int)→void
Feed a scroll, scroll index, scroll count, scroll progress state and scroll progress end, continuously during the process
'''
def __init__(self):
self.scrolls = {}
self.next = 0
self.pos = 0
def __call__(self, scroll, scrolli, scrolln, progess, end):
if directory not in self.dirs:
self.dirs[directory] = self.next
self.next += 1
p = self.dirs[directory]
if p > self.pos:
print('\033[%iBm', p - self.pos)
elif p < self.pos:
print('\033[%iAm', self.pos - p)
s = '\033[01;3%im%s'
if progress == 0:
s %= (3, 'WAIT')
elif progress == end:
s %= (2, 'DONE')
else:
s %= (1, '%2.1f' % progress * 100 / end)
print('[%s\033[00m] (%i/%i) %s\n' % (s, scrolli, scrolln, scroll))
self.pos = p + 1
return LibSpike.rollback(Agg(), archive, keep, skipe, gradeness)
def proofread(self, scrolls):
'''
Look for errors in a scrolls
@param scrolls:list<str> Scrolls to proofread
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int, [*])→void
Feed a scroll, 0, scroll index:int, scroll count:int when a scroll proofreading begins
Feed a scroll, 1, error message:str when a error is found
'''
def __init__(self):
pass
def __call__(self, scroll, err, *args):
if err == 0:
index = args[0]
count = args[1]
print('(%i/%i) %s' % (index, count, scroll))
else:
message = args[0]
print('Error: %s: %s' % (scroll, message))
return LibSpike.proofread(Agg(), scrolls)
def clean(self, private = False):
'''
Remove unneeded ponies that are installed as dependencies
@param private:bool Whether to uninstall user private ponies rather than user shared ponies
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, int, int)→void
Feed a scroll, removal progress state and removal progress end state, continuously during the progress,
this begins by feeding the state 0 when a scroll is enqueued, when all is enqueued the removal begins.
'''
def __init__(self):
self.scrolls = {}
self.next = 0
self.pos = 0
def __call__(self, scroll, progress, end):
if directory not in self.dirs:
self.dirs[directory] = self.next
self.next += 1
p = self.dirs[directory]
if p > self.pos:
print('\033[%iBm', p - self.pos)
elif p < self.pos:
print('\033[%iAm', self.pos - p)
s = '\033[01;3%im%s'
if progress == 0:
s %= (3, 'WAIT')
elif progress == end:
s %= (2, 'DONE')
else:
s %= (1, '%2.1f' % progress * 100 / end)
print('[%s\033[00m] %s\n' % (s, directory))
self.pos = p + 1
return LibSpike.clean(Agg(), private)
def example_shot(self, scrolls, viewer, all_at_once = False):
'''
Display example shots for scrolls
@param scrolls:list<str> Scrolls of which to display example shots
@param viewer:str The PNG viewer to use
@param all_at_once:bool Whether to display all images in a single process instance
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str?)→void
Feed a scroll and its example shot file when found, or the scroll and `None` if there is not example shot.
'''
def __init__(self):
self.queue = [viewer]
def __call__(self, scroll, shot):
if shot is None:
print('%s has no example shot' % scroll)
elif all_at_once:
self.queue.append(shot)
else:
print(scroll)
Popen([viewer, shot]).communicate()
def done(self):
if all_at_once:
Popen(self.queue).communicate()
exit_value = LibSpike.example_shot(Agg(), scrolls)
if exit_value != 0:
Agg.done()
return exit_value
def interactive(self):
'''
Start interactive mode with terminal graphics
@return :byte Exit value, see description of `mane`
'''
if not sys.stdout.isatty:
printerr(self.execprog + ': trying to start interative mode from a pipe')
return 15
# TODO interactive mode
return 0
def sha3sum(self, files):
'''
Calculate the SHA3 checksum for files to be used in scrolls
@param files:list<str> Files for which to calculate the checksum
@return :byte Exit value, see description of `mane`
'''
class Agg:
'''
aggregator:(str, str?)→void
Feed a file and its checksum when one has been calculated.
`None` is returned as the checksum if it is not a regular file or does not exist.
'''
def __init__(self):
pass
def __call__(self, filename, checksum):
if checksum is None:
if not os.path.exists(filename):
printerr('%s is not exist.' % filename)
else:
printerr('%s is not a regular file.' % filename)
else:
print('\033[01m%s\033[21m %s' % (checksum, filename));
return LibSpike.sha3sum(Agg(), files)
tty = ('TERM' in os.environ) and (os.environ['TERM'] in ('linux', 'hurd'))
if __name__ == '__main__': # sic (`applebloom main`)
spike = Spike()
spike.mane(sys.argv)
| GNU-Pony/spike | src/spike.py | Python | gpl-3.0 | 70,096 |
#!/usr/bin/env python
'''
ctypesgencore.parser.ctypesparser contains a class, CtypesParser, which is a
subclass of ctypesgencore.parser.cparser.CParser. CtypesParser overrides the
handle_declaration() method of CParser. It turns the low-level type declarations
produced by CParser into CtypesType instances and breaks the parser's general
declarations into function, variable, typedef, constant, and type descriptions.
'''
__docformat__ = 'restructuredtext'
__all__ = ["CtypesParser"]
from cparser import *
from ctypesgencore.ctypedescs import *
from cdeclarations import *
from ctypesgencore.expressions import *
def get_ctypes_type(typ, declarator, check_qualifiers=False):
signed = True
typename = 'int'
longs = 0
t = None
for specifier in typ.specifiers:
if isinstance(specifier, StructTypeSpecifier):
t = make_struct_from_specifier(specifier)
elif isinstance(specifier, EnumSpecifier):
t = make_enum_from_specifier(specifier)
elif specifier == 'signed':
signed = True
elif specifier == 'unsigned':
signed = False
elif specifier == 'long':
longs += 1
else:
typename = str(specifier)
if not t:
# It is a numeric type of some sort
if (typename,signed,longs) in ctypes_type_map:
t = CtypesSimple(typename,signed,longs)
elif signed and not longs:
t = CtypesTypedef(typename)
else:
name = " ".join(typ.specifiers)
if typename in [x[0] for x in ctypes_type_map.keys()]:
# It's an unsupported variant of a builtin type
error = "Ctypes does not support the type \"%s\"." % name
else:
error = "Ctypes does not support adding additional " \
"specifiers to typedefs, such as \"%s\"" % name
t = CtypesTypedef(name)
t.error(error,cls='unsupported-type')
if declarator and declarator.bitfield:
t = CtypesBitfield(t,declarator.bitfield)
qualifiers = []
qualifiers.extend(typ.qualifiers)
while declarator and declarator.pointer:
if declarator.parameters is not None:
variadic = "..." in declarator.parameters
params = []
for param in declarator.parameters:
if param=="...":
break
params.append(get_ctypes_type(param.type, param.declarator))
t = CtypesFunction(t, params, variadic)
a = declarator.array
while a:
t = CtypesArray(t, a.size)
a = a.array
qualifiers.extend(declarator.qualifiers)
t = CtypesPointer(t, declarator.qualifiers)
declarator = declarator.pointer
if declarator and declarator.parameters is not None:
variadic = "..." in declarator.parameters
params = []
for param in declarator.parameters:
if param=="...":
break
params.append(get_ctypes_type(param.type, param.declarator))
t = CtypesFunction(t, params, variadic)
if declarator:
a = declarator.array
while a:
t = CtypesArray(t, a.size)
a = a.array
if isinstance(t, CtypesPointer) and \
isinstance(t.destination, CtypesSimple) and \
t.destination.name=="char" and \
t.destination.signed:
t = CtypesSpecial("String")
return t
def make_struct_from_specifier(specifier):
variety = {True:"union", False:"struct"}[specifier.is_union]
tag = specifier.tag
if specifier.declarations:
members = []
for declaration in specifier.declarations:
t = get_ctypes_type(declaration.type,
declaration.declarator,
check_qualifiers=True)
declarator = declaration.declarator
if declarator is None:
# XXX TEMPORARY while struct with no typedef not filled in
break
while declarator.pointer:
declarator = declarator.pointer
name = declarator.identifier
members.append((name, remove_function_pointer(t)))
else:
members = None
return CtypesStruct(tag,variety,members,
src=(specifier.filename,specifier.lineno))
def make_enum_from_specifier(specifier):
tag = specifier.tag
enumerators = []
last_name = None
for e in specifier.enumerators:
if e.expression:
value = e.expression
else:
if last_name:
value = BinaryExpressionNode("addition", (lambda x,y:x+y),
"(%s + %s)", (False,False),
IdentifierExpressionNode(last_name),
ConstantExpressionNode(1))
else:
value = ConstantExpressionNode(0)
enumerators.append((e.name,value))
last_name = e.name
return CtypesEnum(tag, enumerators,
src=(specifier.filename,specifier.lineno))
class CtypesParser(CParser):
'''Parse a C file for declarations that can be used by ctypes.
Subclass and override the handle_ctypes_* methods.
'''
def handle_declaration(self, declaration, filename, lineno):
t = get_ctypes_type(declaration.type, declaration.declarator)
if type(t) in (CtypesStruct, CtypesEnum):
self.handle_ctypes_new_type(
remove_function_pointer(t), filename, lineno)
declarator = declaration.declarator
if declarator is None:
# XXX TEMPORARY while struct with no typedef not filled in
return
while declarator.pointer:
declarator = declarator.pointer
name = declarator.identifier
if declaration.storage == 'typedef':
self.handle_ctypes_typedef(
name, remove_function_pointer(t), filename, lineno)
elif type(t) == CtypesFunction:
self.handle_ctypes_function(
name, t.restype, t.argtypes, t.variadic, filename, lineno)
elif declaration.storage != 'static':
self.handle_ctypes_variable(name, t, filename, lineno)
# ctypes parser interface. Override these methods in your subclass.
def handle_ctypes_new_type(self, ctype, filename, lineno):
pass
def handle_ctypes_typedef(self, name, ctype, filename, lineno):
pass
def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
pass
def handle_ctypes_variable(self, name, ctype, filename, lineno):
pass
| AsherBond/MondocosmOS | grass_trunk/lib/python/ctypes/ctypesgencore/parser/ctypesparser.py | Python | agpl-3.0 | 6,812 |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Table(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "table"
_valid_props = {
"cells",
"columnorder",
"columnordersrc",
"columnwidth",
"columnwidthsrc",
"customdata",
"customdatasrc",
"domain",
"header",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"ids",
"idssrc",
"meta",
"metasrc",
"name",
"stream",
"type",
"uid",
"uirevision",
"visible",
}
# cells
# -----
@property
def cells(self):
"""
The 'cells' property is an instance of Cells
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.Cells`
- A dict of string/value properties that will be passed
to the Cells constructor
Supported dict properties:
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans two or more lines (i.e. `text` contains
one or more <br> HTML tags) or if an explicit
width is set to override the text width.
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
fill
:class:`plotly.graph_objects.table.cells.Fill`
instance or dict with compatible properties
font
:class:`plotly.graph_objects.table.cells.Font`
instance or dict with compatible properties
format
Sets the cell value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
formatsrc
Sets the source reference on Chart Studio Cloud
for format .
height
The height of cells.
line
:class:`plotly.graph_objects.table.cells.Line`
instance or dict with compatible properties
prefix
Prefix for cell values.
prefixsrc
Sets the source reference on Chart Studio Cloud
for prefix .
suffix
Suffix for cell values.
suffixsrc
Sets the source reference on Chart Studio Cloud
for suffix .
values
Cell values. `values[m][n]` represents the
value of the `n`th point in column `m`,
therefore the `values[m]` vector length for all
columns must be the same (longer vectors will
be truncated). Each value must be a finite
number or a string.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
Returns
-------
plotly.graph_objs.table.Cells
"""
return self["cells"]
@cells.setter
def cells(self, val):
self["cells"] = val
# columnorder
# -----------
@property
def columnorder(self):
"""
Specifies the rendered order of the data columns; for example,
a value `2` at position `0` means that column index `0` in the
data will be rendered as the third column, as columns have an
index base of zero.
The 'columnorder' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["columnorder"]
@columnorder.setter
def columnorder(self, val):
self["columnorder"] = val
# columnordersrc
# --------------
@property
def columnordersrc(self):
"""
Sets the source reference on Chart Studio Cloud for
columnorder .
The 'columnordersrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["columnordersrc"]
@columnordersrc.setter
def columnordersrc(self, val):
self["columnordersrc"] = val
# columnwidth
# -----------
@property
def columnwidth(self):
"""
The width of columns expressed as a ratio. Columns fill the
available width in proportion of their specified column widths.
The 'columnwidth' property is a number and may be specified as:
- An int or float
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["columnwidth"]
@columnwidth.setter
def columnwidth(self, val):
self["columnwidth"] = val
# columnwidthsrc
# --------------
@property
def columnwidthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
columnwidth .
The 'columnwidthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["columnwidthsrc"]
@columnwidthsrc.setter
def columnwidthsrc(self, val):
self["columnwidthsrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this table trace .
row
If there is a layout grid, use the domain for
this row in the grid for this table trace .
x
Sets the horizontal domain of this table trace
(in plot fraction).
y
Sets the vertical domain of this table trace
(in plot fraction).
Returns
-------
plotly.graph_objs.table.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# header
# ------
@property
def header(self):
"""
The 'header' property is an instance of Header
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.Header`
- A dict of string/value properties that will be passed
to the Header constructor
Supported dict properties:
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans two or more lines (i.e. `text` contains
one or more <br> HTML tags) or if an explicit
width is set to override the text width.
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
fill
:class:`plotly.graph_objects.table.header.Fill`
instance or dict with compatible properties
font
:class:`plotly.graph_objects.table.header.Font`
instance or dict with compatible properties
format
Sets the cell value formatting rule using d3
formatting mini-language which is similar to
those of Python. See
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
formatsrc
Sets the source reference on Chart Studio Cloud
for format .
height
The height of cells.
line
:class:`plotly.graph_objects.table.header.Line`
instance or dict with compatible properties
prefix
Prefix for cell values.
prefixsrc
Sets the source reference on Chart Studio Cloud
for prefix .
suffix
Suffix for cell values.
suffixsrc
Sets the source reference on Chart Studio Cloud
for suffix .
values
Header cell values. `values[m][n]` represents
the value of the `n`th point in column `m`,
therefore the `values[m]` vector length for all
columns must be the same (longer vectors will
be truncated). Each value must be a finite
number or a string.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
Returns
-------
plotly.graph_objs.table.Header
"""
return self["header"]
@header.setter
def header(self, val):
self["header"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.table.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.table.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.table.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
cells
:class:`plotly.graph_objects.table.Cells` instance or
dict with compatible properties
columnorder
Specifies the rendered order of the data columns; for
example, a value `2` at position `0` means that column
index `0` in the data will be rendered as the third
column, as columns have an index base of zero.
columnordersrc
Sets the source reference on Chart Studio Cloud for
columnorder .
columnwidth
The width of columns expressed as a ratio. Columns fill
the available width in proportion of their specified
column widths.
columnwidthsrc
Sets the source reference on Chart Studio Cloud for
columnwidth .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.table.Domain` instance or
dict with compatible properties
header
:class:`plotly.graph_objects.table.Header` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.table.Hoverlabel` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
stream
:class:`plotly.graph_objects.table.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
cells=None,
columnorder=None,
columnordersrc=None,
columnwidth=None,
columnwidthsrc=None,
customdata=None,
customdatasrc=None,
domain=None,
header=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
ids=None,
idssrc=None,
meta=None,
metasrc=None,
name=None,
stream=None,
uid=None,
uirevision=None,
visible=None,
**kwargs
):
"""
Construct a new Table object
Table view for detailed data viewing. The data are arranged in
a grid of rows and columns. Most styling can be specified for
columns, rows or individual cells. Table is using a column-
major order, ie. the grid is represented as a vector of column
vectors.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Table`
cells
:class:`plotly.graph_objects.table.Cells` instance or
dict with compatible properties
columnorder
Specifies the rendered order of the data columns; for
example, a value `2` at position `0` means that column
index `0` in the data will be rendered as the third
column, as columns have an index base of zero.
columnordersrc
Sets the source reference on Chart Studio Cloud for
columnorder .
columnwidth
The width of columns expressed as a ratio. Columns fill
the available width in proportion of their specified
column widths.
columnwidthsrc
Sets the source reference on Chart Studio Cloud for
columnwidth .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
domain
:class:`plotly.graph_objects.table.Domain` instance or
dict with compatible properties
header
:class:`plotly.graph_objects.table.Header` instance or
dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.table.Hoverlabel` instance
or dict with compatible properties
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
stream
:class:`plotly.graph_objects.table.Stream` instance or
dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Table
"""
super(Table, self).__init__("table")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Table
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Table`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("cells", None)
_v = cells if cells is not None else _v
if _v is not None:
self["cells"] = _v
_v = arg.pop("columnorder", None)
_v = columnorder if columnorder is not None else _v
if _v is not None:
self["columnorder"] = _v
_v = arg.pop("columnordersrc", None)
_v = columnordersrc if columnordersrc is not None else _v
if _v is not None:
self["columnordersrc"] = _v
_v = arg.pop("columnwidth", None)
_v = columnwidth if columnwidth is not None else _v
if _v is not None:
self["columnwidth"] = _v
_v = arg.pop("columnwidthsrc", None)
_v = columnwidthsrc if columnwidthsrc is not None else _v
if _v is not None:
self["columnwidthsrc"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("header", None)
_v = header if header is not None else _v
if _v is not None:
self["header"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "table"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/python-api | packages/python/plotly/plotly/graph_objs/_table.py | Python | mit | 36,304 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Removes unneeded nodes from a GraphDef file.
This script is designed to help streamline models, by taking the input and
output nodes that will be used by an application and figuring out the smallest
set of operations that are required to run for those arguments. The resulting
minimal graph is then saved out.
The advantages of running this script are:
- You may be able to shrink the file size.
- Operations that are unsupported on your platform but still present can be
safely removed.
The resulting graph may not be as flexible as the original though, since any
input nodes that weren't explicitly mentioned may not be accessible any more.
An example of command-line usage is:
bazel build tensorflow/python/tools:strip_unused && \
bazel-bin/tensorflow/python/tools/strip_unused \
--input_graph=some_graph_def.pb \
--output_graph=/tmp/stripped_graph.pb \
--input_node_names=input0
--output_node_names=softmax
You can also look at strip_unused_test.py for an example of how to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import app
from tensorflow.python.tools import strip_unused_lib
FLAGS = None
def main(unused_args):
strip_unused_lib.strip_unused_from_files(FLAGS.input_graph,
FLAGS.input_binary,
FLAGS.output_graph,
FLAGS.output_binary,
FLAGS.input_node_names,
FLAGS.output_node_names,
FLAGS.placeholder_type_enum)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--input_graph',
type=str,
default='',
help='TensorFlow \'GraphDef\' file to load.')
parser.add_argument(
'--input_binary',
nargs='?',
const=True,
type='bool',
default=False,
help='Whether the input files are in binary format.')
parser.add_argument(
'--output_graph',
type=str,
default='',
help='Output \'GraphDef\' file name.')
parser.add_argument(
'--output_binary',
nargs='?',
const=True,
type='bool',
default=True,
help='Whether to write a binary format graph.')
parser.add_argument(
'--input_node_names',
type=str,
default='',
help='The name of the input nodes, comma separated.')
parser.add_argument(
'--output_node_names',
type=str,
default='',
help='The name of the output nodes, comma separated.')
parser.add_argument(
'--placeholder_type_enum',
type=int,
default=dtypes.float32.as_datatype_enum,
help='The AttrValue enum to use for placeholders.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/python/tools/strip_unused.py | Python | bsd-2-clause | 3,786 |
##################################################################
# Code for testing the variational Multi-Stage Generative Model. #
##################################################################
from __future__ import print_function, division
# basic python
import cPickle as pickle
from PIL import Image
import numpy as np
import numpy.random as npr
from collections import OrderedDict
import time
# theano business
import theano
import theano.tensor as T
# blocks stuff
from blocks.initialization import Constant, IsotropicGaussian, Orthogonal
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.roles import PARAMETER
from blocks.model import Model
from blocks.bricks import Tanh, Identity, Rectifier
from blocks.bricks.cost import BinaryCrossEntropy
from blocks.bricks.recurrent import SimpleRecurrent, LSTM
# phil's sweetness
import utils
from BlocksModels import *
from RAMBlocks import *
from DKCode import get_adam_updates, get_adadelta_updates
from load_data import load_udm, load_tfd, load_svhn_gray, load_binarized_mnist
from HelperFuncs import construct_masked_data, shift_and_scale_into_01, \
row_shuffle, to_fX, one_hot_np
RESULT_PATH = "RAM_TEST_RESULTS/"
def test_seq_cond_gen_bouncing_balls(step_type='add'):
##############################
# File tag, for output stuff #
##############################
result_tag = "{}DKDK_SCG".format(RESULT_PATH)
##########################
# Get some training data #
##########################
rng = np.random.RandomState(1234)
total_steps = 10
obs_dim = 28*28
#data = np.load('/data/lisatmp2/kruegerd/bouncing_balls/bouncing_ball.npy')
data = npr.rand(25000, total_steps, obs_dim).astype(theano.config.floatX)
data = data[:,:total_steps,:]
Xtr = data[:15000]
Xva = data[15000:20000]
Xte = data[20000:]
tr_samples = Xtr.shape[0]
va_samples = Xva.shape[0]
te_samples = Xte.shape[0]
def dimshuffle_batch(Xb):
Xb_fit_for_scan = np.swapaxes(Xb, 0, 1)
return Xb_fit_for_scan
batch_size = 200
############################################################
# Setup some parameters for the Iterative Refinement Model #
############################################################
#total_steps = 10
init_steps = 3
exit_rate = 0.2
x_dim = obs_dim
y_dim = obs_dim
z_dim = 100
rnn_dim = 300
write_dim = 300
mlp_dim = 300
def visualize_attention(result, pre_tag="AAA", post_tag="AAA"):
seq_len = result[0].shape[0]
samp_count = result[0].shape[1]
# get generated predictions
x_samps = np.zeros((seq_len*samp_count, x_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
x_samps[idx] = result[0][s2,s1,:]
idx += 1
file_name = "{0:s}_traj_xs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(x_samps, file_name, num_rows=20)
# get sequential attention maps
seq_samps = np.zeros((seq_len*samp_count, x_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[1][s2,s1,:x_dim] + result[1][s2,s1,x_dim:]
idx += 1
file_name = "{0:s}_traj_att_maps_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
# get sequential attention maps (read out values)
seq_samps = np.zeros((seq_len*samp_count, x_dim))
idx = 0
for s1 in range(samp_count):
for s2 in range(seq_len):
seq_samps[idx] = result[2][s2,s1,:x_dim] + result[2][s2,s1,x_dim:]
idx += 1
file_name = "{0:s}_traj_read_outs_{1:s}.png".format(pre_tag, post_tag)
utils.visualize_samples(seq_samps, file_name, num_rows=20)
return
rnninits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
inits = {
'weights_init': IsotropicGaussian(0.01),
'biases_init': Constant(0.),
}
read_N = 2 # inner/outer grid dimension for reader
reader_mlp = SimpleAttentionReader2d(x_dim=x_dim, con_dim=rnn_dim,
width=28, height=28, N=read_N,
init_scale=2.0, **inits)
read_dim = reader_mlp.read_dim # total number of "pixels" read by reader
writer_mlp = MLP([None, None], [rnn_dim, write_dim, y_dim], \
name="writer_mlp", **inits)
# mlps for processing inputs to LSTMs
con_mlp_in = MLP([Identity()], [ z_dim, 4*rnn_dim], \
name="con_mlp_in", **inits)
var_mlp_in = MLP([Identity()], [(y_dim + read_dim + rnn_dim), 4*rnn_dim], \
name="var_mlp_in", **inits)
gen_mlp_in = MLP([Identity()], [ (read_dim + rnn_dim), 4*rnn_dim], \
name="gen_mlp_in", **inits)
# mlps for turning LSTM outputs into conditionals over z_gen
con_mlp_out = CondNet([Rectifier(), Rectifier()], \
[rnn_dim, mlp_dim, mlp_dim, z_dim], \
name="con_mlp_out", **inits)
gen_mlp_out = CondNet([], [rnn_dim, z_dim], name="gen_mlp_out", **inits)
var_mlp_out = CondNet([], [rnn_dim, z_dim], name="var_mlp_out", **inits)
# LSTMs for the actual LSTMs (obviously, perhaps)
con_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="con_rnn", **rnninits)
gen_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="gen_rnn", **rnninits)
var_rnn = BiasedLSTM(dim=rnn_dim, ig_bias=2.0, fg_bias=2.0, \
name="var_rnn", **rnninits)
SeqCondGen_doc_str = \
"""
SeqCondGen -- constructs conditional densities under time constraints.
This model sequentially constructs a conditional density estimate by taking
repeated glimpses at the input x, and constructing a hypothesis about the
output y. The objective is maximum likelihood for (x,y) pairs drawn from
some training set. We learn a proper generative model, using variational
inference -- which can be interpreted as a sort of guided policy search.
The input pairs (x, y) can be either "static" or "sequential". In the
static case, the same x and y are used at every step of the hypothesis
construction loop. In the sequential case, x and y can change at each step
of the loop.
Parameters:
x_and_y_are_seqs: boolean telling whether the conditioning information
and prediction targets are sequential.
total_steps: total number of steps in sequential estimation process
init_steps: number of steps prior to first NLL measurement
exit_rate: probability of exiting following each non "init" step
**^^ THIS IS SET TO 0 WHEN USING SEQUENTIAL INPUT ^^**
nll_weight: weight for the prediction NLL term at each step.
**^^ THIS IS IGNORED WHEN USING STATIC INPUT ^^**
step_type: whether to use "additive" steps or "jump" steps
-- jump steps predict directly from the controller LSTM's
"hidden" state (a.k.a. its memory cells).
x_dim: dimension of inputs on which to condition
y_dim: dimension of outputs to predict
reader_mlp: used for reading from the input
writer_mlp: used for writing to the output prediction
con_mlp_in: preprocesses input to the "controller" LSTM
con_rnn: the "controller" LSTM
con_mlp_out: CondNet for distribution over z given con_rnn
gen_mlp_in: preprocesses input to the "generator" LSTM
gen_rnn: the "generator" LSTM
gen_mlp_out: CondNet for distribution over z given gen_rnn
var_mlp_in: preprocesses input to the "variational" LSTM
var_rnn: the "variational" LSTM
var_mlp_out: CondNet for distribution over z given gen_rnn
"""
SCG = SeqCondGen(
x_and_y_are_seqs=True,
total_steps=total_steps,
init_steps=init_steps,
exit_rate=exit_rate,
nll_weight=0.2, # weight of NLL term at each step
step_type=step_type,
x_dim=x_dim,
y_dim=y_dim,
reader_mlp=reader_mlp,
writer_mlp=writer_mlp,
con_mlp_in=con_mlp_in,
con_mlp_out=con_mlp_out,
con_rnn=con_rnn,
gen_mlp_in=gen_mlp_in,
gen_mlp_out=gen_mlp_out,
gen_rnn=gen_rnn,
var_mlp_in=var_mlp_in,
var_mlp_out=var_mlp_out,
var_rnn=var_rnn
)
SCG.initialize()
compile_start_time = time.time()
# build the attention trajectory sampler
SCG.build_attention_funcs()
# quick test of attention trajectory sampler
samp_count = 100
Xb = dimshuffle_batch(Xva[:samp_count,:])
result = SCG.sample_attention(Xb, Xb)
visualize_attention(result, pre_tag=result_tag, post_tag="b0")
# build the main model functions (i.e. training and cost functions)
SCG.build_model_funcs()
compile_end_time = time.time()
compile_minutes = (compile_end_time - compile_start_time) / 60.0
print("THEANO COMPILE TIME (MIN): {}".format(compile_minutes))
#SCG.load_model_params(f_name="SCG_params.pkl")
################################################################
# Apply some updates, to check that they aren't totally broken #
################################################################
print("Beginning to train the model...")
out_file = open("{}_results.txt".format(result_tag), 'wb')
out_file.flush()
costs = [0. for i in range(10)]
learn_rate = 0.0001
momentum = 0.8
batch_idx = np.arange(batch_size) + tr_samples
for i in range(250000):
scale = min(1.0, ((i+1) / 2500.0))
if (((i + 1) % 10000) == 0):
learn_rate = learn_rate * 0.95
if (i > 10000):
momentum = 0.95
else:
momentum = 0.8
# get the indices of training samples for this batch update
batch_idx += batch_size
if (np.max(batch_idx) >= tr_samples):
# we finished an "epoch", so we rejumble the training set
Xtr = row_shuffle(Xtr)
batch_idx = np.arange(batch_size)
# set sgd and objective function hyperparams for this update
SCG.set_sgd_params(lr=learn_rate, mom_1=momentum, mom_2=0.99)
SCG.set_lam_kld(lam_kld_q2p=0.95, lam_kld_p2q=0.05, lam_kld_p2g=0.05)
# perform a minibatch update and record the cost for this batch
Xb = dimshuffle_batch( Xtr.take(batch_idx, axis=0) )
result = SCG.train_joint(Xb, Xb)
costs = [(costs[j] + result[j]) for j in range(len(result))]
# output diagnostic information and checkpoint parameters, etc.
if ((i % 250) == 0):
costs = [(v / 250.0) for v in costs]
str1 = "-- batch {0:d} --".format(i)
str2 = " total_cost: {0:.4f}".format(costs[0])
str3 = " nll_bound : {0:.4f}".format(costs[1])
str4 = " nll_term : {0:.4f}".format(costs[2])
str5 = " kld_q2p : {0:.4f}".format(costs[3])
str6 = " kld_p2q : {0:.4f}".format(costs[4])
str7 = " kld_p2g : {0:.4f}".format(costs[5])
str8 = " reg_term : {0:.4f}".format(costs[6])
joint_str = "\n".join([str1, str2, str3, str4, str5, str6, str7, str8])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
costs = [0.0 for v in costs]
if ((i % 500) == 0): #((i % 1000) == 0):
SCG.save_model_params("{}_params.pkl".format(result_tag))
# compute a small-sample estimate of NLL bound on validation set
Xva = row_shuffle(Xva)
Xb = dimshuffle_batch( Xva[:1000] )
va_costs = SCG.compute_nll_bound(Xb, Xb)
str1 = " va_nll_bound : {}".format(va_costs[1])
str2 = " va_nll_term : {}".format(va_costs[2])
str3 = " va_kld_q2p : {}".format(va_costs[3])
joint_str = "\n".join([str1, str2, str3])
print(joint_str)
out_file.write(joint_str+"\n")
out_file.flush()
###########################################
# Sample and draw attention trajectories. #
###########################################
samp_count = 100
Xb = dimshuffle_batch( Xva[:samp_count] )
result = SCG.sample_attention(Xb, Xb)
post_tag = "b{0:d}".format(i)
visualize_attention(result, pre_tag=result_tag, post_tag=post_tag)
if __name__=="__main__":
test_seq_cond_gen_bouncing_balls(step_type='add')
| capybaralet/Sequential-Generation | TestKruegerCode.py | Python | mit | 13,097 |
import pytest
import six
from mock import Mock
from thefuck import conf
from tests.utils import Rule
@pytest.mark.parametrize('enabled, rules, result', [
(True, conf.DEFAULT_RULES, True),
(False, conf.DEFAULT_RULES, False),
(False, conf.DEFAULT_RULES + ['test'], True)])
def test_default(enabled, rules, result):
assert (Rule('test', enabled_by_default=enabled) in rules) == result
@pytest.fixture
def load_source(mocker):
return mocker.patch('thefuck.conf.load_source')
@pytest.fixture
def environ(monkeypatch):
data = {}
monkeypatch.setattr('thefuck.conf.os.environ', data)
return data
@pytest.mark.usefixture('environ')
def test_settings_defaults(load_source):
load_source.return_value = object()
for key, val in conf.DEFAULT_SETTINGS.items():
assert getattr(conf.get_settings(Mock()), key) == val
@pytest.mark.usefixture('environ')
class TestSettingsFromFile(object):
def test_from_file(self, load_source):
load_source.return_value = Mock(rules=['test'],
wait_command=10,
require_confirmation=True,
no_colors=True,
priority={'vim': 100})
settings = conf.get_settings(Mock())
assert settings.rules == ['test']
assert settings.wait_command == 10
assert settings.require_confirmation is True
assert settings.no_colors is True
assert settings.priority == {'vim': 100}
def test_from_file_with_DEFAULT(self, load_source):
load_source.return_value = Mock(rules=conf.DEFAULT_RULES + ['test'],
wait_command=10,
require_confirmation=True,
no_colors=True)
settings = conf.get_settings(Mock())
assert settings.rules == conf.DEFAULT_RULES + ['test']
@pytest.mark.usefixture('load_source')
class TestSettingsFromEnv(object):
def test_from_env(self, environ):
environ.update({'THEFUCK_RULES': 'bash:lisp',
'THEFUCK_WAIT_COMMAND': '55',
'THEFUCK_REQUIRE_CONFIRMATION': 'true',
'THEFUCK_NO_COLORS': 'false',
'THEFUCK_PRIORITY': 'bash=10:lisp=wrong:vim=15'})
settings = conf.get_settings(Mock())
assert settings.rules == ['bash', 'lisp']
assert settings.wait_command == 55
assert settings.require_confirmation is True
assert settings.no_colors is False
assert settings.priority == {'bash': 10, 'vim': 15}
def test_from_env_with_DEFAULT(self, environ):
environ.update({'THEFUCK_RULES': 'DEFAULT_RULES:bash:lisp'})
settings = conf.get_settings(Mock())
assert settings.rules == conf.DEFAULT_RULES + ['bash', 'lisp']
class TestInitializeSettingsFile(object):
def test_ignore_if_exists(self):
settings_path_mock = Mock(is_file=Mock(return_value=True), open=Mock())
user_dir_mock = Mock(joinpath=Mock(return_value=settings_path_mock))
conf.initialize_settings_file(user_dir_mock)
assert settings_path_mock.is_file.call_count == 1
assert not settings_path_mock.open.called
def test_create_if_doesnt_exists(self):
settings_file = six.StringIO()
settings_path_mock = Mock(
is_file=Mock(return_value=False),
open=Mock(return_value=Mock(
__exit__=lambda *args: None, __enter__=lambda *args: settings_file)))
user_dir_mock = Mock(joinpath=Mock(return_value=settings_path_mock))
conf.initialize_settings_file(user_dir_mock)
settings_file_contents = settings_file.getvalue()
assert settings_path_mock.is_file.call_count == 1
assert settings_path_mock.open.call_count == 1
assert conf.SETTINGS_HEADER in settings_file_contents
for setting in conf.DEFAULT_SETTINGS.items():
assert '# {} = {}\n'.format(*setting) in settings_file_contents
settings_file.close()
| bigplus/thefuck | tests/test_conf.py | Python | mit | 4,127 |
from setuptools import setup, find_packages
from os import path
import io
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with io.open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='modsec_tools',
version='0.2',
description='Scripts to analyse logfiles and generate rule files for mod_security2.',
long_description=long_description,
url='https://github.com/zathras777/modsec_tools',
author='david reid',
author_email='[email protected]',
license='Unlicense',
packages = find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
keywords='apache security logfiles rules',
entry_points={
'console_scripts': ['analyse_audit=modsec_tools.analyse_audit:main',
'extract_rules=modsec_tools.extract_rules:main']
},
)
| zathras777/modsec_tools | setup.py | Python | unlicense | 1,026 |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from haystack import connections, reset_search_queries
from haystack.utils.loading import UnifiedIndex
from ..core.models import MockModel
from .test_solr_backend import clear_solr_index, SolrMockModelSearchIndex
@override_settings(DEBUG=True)
class SearchModelAdminTestCase(TestCase):
fixtures = ['base_data.json', 'bulk_data.json']
def setUp(self):
super(SearchModelAdminTestCase, self).setUp()
# With the models setup, you get the proper bits.
# Stow.
self.old_ui = connections['solr'].get_unified_index()
self.ui = UnifiedIndex()
smmsi = SolrMockModelSearchIndex()
self.ui.build(indexes=[smmsi])
connections['solr']._index = self.ui
# Wipe it clean.
clear_solr_index()
# Force indexing of the content.
smmsi.update(using='solr')
superuser = User.objects.create_superuser(
username='superuser',
password='password',
email='[email protected]',
)
def tearDown(self):
# Restore.
connections['solr']._index = self.old_ui
super(SearchModelAdminTestCase, self).tearDown()
def test_usage(self):
reset_search_queries()
self.assertEqual(len(connections['solr'].queries), 0)
self.assertEqual(self.client.login(username='superuser', password='password'), True)
# First, non-search behavior.
resp = self.client.get('/admin/core/mockmodel/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(connections['solr'].queries), 0)
self.assertEqual(resp.context['cl'].full_result_count, 23)
# Then search behavior.
resp = self.client.get('/admin/core/mockmodel/', data={'q': 'Haystack'})
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(connections['solr'].queries), 3)
self.assertEqual(resp.context['cl'].full_result_count, 23)
# Ensure they aren't search results.
self.assertEqual(isinstance(resp.context['cl'].result_list[0], MockModel), True)
result_pks = [i.pk for i in resp.context['cl'].result_list]
self.assertIn(5, result_pks)
# Make sure only changelist is affected.
resp = self.client.get(reverse('admin:core_mockmodel_change', args=(1, )))
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.context['original'].id, 1)
self.assertTemplateUsed(resp, 'admin/change_form.html')
# The Solr query count should be unchanged:
self.assertEqual(len(connections['solr'].queries), 3)
| celerityweb/django-haystack | test_haystack/solr_tests/test_admin.py | Python | bsd-3-clause | 2,888 |
import boto3
from datetime import datetime, timedelta
ec2 = boto3.client('ec2')
account_ids = ['12345']
def lambda_handler(event, context):
days = 7
delete_time = datetime.strftime(datetime.utcnow() - timedelta(days=days),'%Y-%m-%dT%H:%M:%S.000Z')
print 'Delete Snapshots Run Before %s' % delete_time
print 'Deleting any snapshots older than {days} days'.format(days=days)
#Pagination Size, Max is 1000
MaxPageResults = 1000
MinuteInSeconds = 60
UnitConversionMultipler = 1000
# Maximum execution of lambda is 5 minutes
# We want our lambda function to stop when
# the execution time is greater than 4.5 minutes
MaxExecutionTime = 5 * MinuteInSeconds * UnitConversionMultipler
OurCodeExecutionTime = 4.5 * MinuteInSeconds * UnitConversionMultipler
snapshot_response = ec2.describe_snapshots(OwnerIds=account_ids, MaxResults=MaxPageResults)
#Infinite Loop
while True :
deletion_counter = 0
size_counter = 0
# I am not sure, but i think delete variable should be moved
# immediately after the for loop below because,
# each snapshot will have its own tags and this
# should be checked for each snapshot
delete = True
for snapshot in snapshot_response['Snapshots']:
if 'Tags' in snapshot:
for tag in snapshot['Tags']:
if (tag['Key'] == 'DoNotDelete') and (tag['Value'].lower() == 'yes'):
delete = False
start_time = datetime.strftime(snapshot['StartTime'],'%Y-%m-%dT%H:%M:%S.000Z')
if "ami-" not in snapshot['Description']:
if (start_time < delete_time) and (delete == True ):
print 'Deleting {id}'.format(id=snapshot['SnapshotId'])
deletion_counter = deletion_counter + 1
size_counter = size_counter + snapshot['VolumeSize']
try:
ec2.delete_snapshot(SnapshotId=snapshot['SnapshotId'])
except Exception, e:
print e
# If the below statement needs to printed after whole execution is completed
# i.e. the total deletion number and total size counter is required then
# this below code should be moved outside of while loop along with the deletion_counter and
# size_counter variable outside/above the while loop
# Currently this code prints deletion number and size counter for every page
print 'Deleted {number} snapshots totalling {size} GB'.format(
number=deletion_counter,
size=size_counter
)
# Break the loop and End the script if processing time in lambda
# has reached greater or equal to 4.5 minutes (MaxExecutionTime)
if context.get_remaining_time_in_millis <= (MaxExecutionTime-OurCodeExecutionTime):
print 'Our Execution time completed'
break
# Break the loop and End the script if there is next page of snapshots
if 'NextToken' not in snapshot_response or snapshot_response['NextToken'] is None or snapshot_response['NextToken'] == '':
print 'No next page found'
break
#Fetch snapshot descriptions for next token
snapshot_response = ec2.describe_snapshots(
OwnerIds = account_ids,
MaxResults = MaxPageResults,
NextToken = snapshot_response['NextToken']
)
| thigley986/Lambda-AWS-EC2-Snapshot-Management | LambdaEBSDeleteSnapshots.py | Python | apache-2.0 | 3,519 |
Subsets and Splits