repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
datapythonista/pandas | pandas/tests/scalar/timestamp/test_unary_ops.py | 3 | 18077 | from datetime import datetime
from dateutil.tz import gettz
import numpy as np
import pytest
import pytz
from pytz import utc
from pandas._libs.tslibs import (
NaT,
Timedelta,
Timestamp,
conversion,
to_offset,
)
from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
import pandas.util._test_decorators as td
import pandas._testing as tm
class TestTimestampUnaryOps:
# --------------------------------------------------------------
# Timestamp.round
@pytest.mark.parametrize(
"timestamp, freq, expected",
[
("20130101 09:10:11", "D", "20130101"),
("20130101 19:10:11", "D", "20130102"),
("20130201 12:00:00", "D", "20130202"),
("20130104 12:00:00", "D", "20130105"),
("2000-01-05 05:09:15.13", "D", "2000-01-05 00:00:00"),
("2000-01-05 05:09:15.13", "H", "2000-01-05 05:00:00"),
("2000-01-05 05:09:15.13", "S", "2000-01-05 05:09:15"),
],
)
def test_round_frequencies(self, timestamp, freq, expected):
dt = Timestamp(timestamp)
result = dt.round(freq)
expected = Timestamp(expected)
assert result == expected
def test_round_tzaware(self):
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("D")
expected = Timestamp("20130101", tz="US/Eastern")
assert result == expected
dt = Timestamp("20130101 09:10:11", tz="US/Eastern")
result = dt.round("s")
assert result == dt
def test_round_30min(self):
# round
dt = Timestamp("20130104 12:32:00")
result = dt.round("30Min")
expected = Timestamp("20130104 12:30:00")
assert result == expected
def test_round_subsecond(self):
# GH#14440 & GH#15578
result = Timestamp("2016-10-17 12:00:00.0015").round("ms")
expected = Timestamp("2016-10-17 12:00:00.002000")
assert result == expected
result = Timestamp("2016-10-17 12:00:00.00149").round("ms")
expected = Timestamp("2016-10-17 12:00:00.001000")
assert result == expected
ts = Timestamp("2016-10-17 12:00:00.0015")
for freq in ["us", "ns"]:
assert ts == ts.round(freq)
result = Timestamp("2016-10-17 12:00:00.001501031").round("10ns")
expected = Timestamp("2016-10-17 12:00:00.001501030")
assert result == expected
def test_round_nonstandard_freq(self):
with tm.assert_produces_warning(False):
Timestamp("2016-10-17 12:00:00.001501031").round("1010ns")
def test_round_invalid_arg(self):
stamp = Timestamp("2000-01-05 05:09:15.13")
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
stamp.round("foo")
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
("2117-01-01 00:00:45", "floor", "15s", "2117-01-01 00:00:45"),
("2117-01-01 00:00:45", "ceil", "15s", "2117-01-01 00:00:45"),
(
"2117-01-01 00:00:45.000000012",
"floor",
"10ns",
"2117-01-01 00:00:45.000000010",
),
(
"1823-01-01 00:00:01.000000012",
"ceil",
"10ns",
"1823-01-01 00:00:01.000000020",
),
("1823-01-01 00:00:01", "floor", "1s", "1823-01-01 00:00:01"),
("1823-01-01 00:00:01", "ceil", "1s", "1823-01-01 00:00:01"),
("NaT", "floor", "1s", "NaT"),
("NaT", "ceil", "1s", "NaT"),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = Timestamp(test_input)
func = getattr(dt, rounder)
result = func(freq)
if dt is NaT:
assert result is NaT
else:
expected = Timestamp(expected)
assert result == expected
@pytest.mark.parametrize(
"test_input, freq, expected",
[
("2018-01-01 00:02:06", "2s", "2018-01-01 00:02:06"),
("2018-01-01 00:02:00", "2T", "2018-01-01 00:02:00"),
("2018-01-01 00:04:00", "4T", "2018-01-01 00:04:00"),
("2018-01-01 00:15:00", "15T", "2018-01-01 00:15:00"),
("2018-01-01 00:20:00", "20T", "2018-01-01 00:20:00"),
("2018-01-01 03:00:00", "3H", "2018-01-01 03:00:00"),
],
)
@pytest.mark.parametrize("rounder", ["ceil", "floor", "round"])
def test_round_minute_freq(self, test_input, freq, expected, rounder):
# Ensure timestamps that shouldn't round dont!
# GH#21262
dt = Timestamp(test_input)
expected = Timestamp(expected)
func = getattr(dt, rounder)
result = func(freq)
assert result == expected
def test_ceil(self):
dt = Timestamp("20130101 09:10:11")
result = dt.ceil("D")
expected = Timestamp("20130102")
assert result == expected
def test_floor(self):
dt = Timestamp("20130101 09:10:11")
result = dt.floor("D")
expected = Timestamp("20130101")
assert result == expected
@pytest.mark.parametrize("method", ["ceil", "round", "floor"])
def test_round_dst_border_ambiguous(self, method):
# GH 18946 round near "fall back" DST
ts = Timestamp("2017-10-29 00:00:00", tz="UTC").tz_convert("Europe/Madrid")
#
result = getattr(ts, method)("H", ambiguous=True)
assert result == ts
result = getattr(ts, method)("H", ambiguous=False)
expected = Timestamp("2017-10-29 01:00:00", tz="UTC").tz_convert(
"Europe/Madrid"
)
assert result == expected
result = getattr(ts, method)("H", ambiguous="NaT")
assert result is NaT
msg = "Cannot infer dst time"
with pytest.raises(pytz.AmbiguousTimeError, match=msg):
getattr(ts, method)("H", ambiguous="raise")
@pytest.mark.parametrize(
"method, ts_str, freq",
[
["ceil", "2018-03-11 01:59:00-0600", "5min"],
["round", "2018-03-11 01:59:00-0600", "5min"],
["floor", "2018-03-11 03:01:00-0500", "2H"],
],
)
def test_round_dst_border_nonexistent(self, method, ts_str, freq):
# GH 23324 round near "spring forward" DST
ts = Timestamp(ts_str, tz="America/Chicago")
result = getattr(ts, method)(freq, nonexistent="shift_forward")
expected = Timestamp("2018-03-11 03:00:00", tz="America/Chicago")
assert result == expected
result = getattr(ts, method)(freq, nonexistent="NaT")
assert result is NaT
msg = "2018-03-11 02:00:00"
with pytest.raises(pytz.NonExistentTimeError, match=msg):
getattr(ts, method)(freq, nonexistent="raise")
@pytest.mark.parametrize(
"timestamp",
[
"2018-01-01 0:0:0.124999360",
"2018-01-01 0:0:0.125000367",
"2018-01-01 0:0:0.125500",
"2018-01-01 0:0:0.126500",
"2018-01-01 12:00:00",
"2019-01-01 12:00:00",
],
)
@pytest.mark.parametrize(
"freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"1D",
],
)
def test_round_int64(self, timestamp, freq):
# check that all rounding modes are accurate to int64 precision
# see GH#22591
dt = Timestamp(timestamp)
unit = to_offset(freq).nanos
# test floor
result = dt.floor(freq)
assert result.value % unit == 0, f"floor not a {freq} multiple"
assert 0 <= dt.value - result.value < unit, "floor error"
# test ceil
result = dt.ceil(freq)
assert result.value % unit == 0, f"ceil not a {freq} multiple"
assert 0 <= result.value - dt.value < unit, "ceil error"
# test round
result = dt.round(freq)
assert result.value % unit == 0, f"round not a {freq} multiple"
assert abs(result.value - dt.value) <= unit // 2, "round error"
if unit % 2 == 0 and abs(result.value - dt.value) == unit // 2:
# round half to even
assert result.value // unit % 2 == 0, "round half to even error"
def test_round_implementation_bounds(self):
# See also: analogous test for Timedelta
result = Timestamp.min.ceil("s")
expected = Timestamp(1677, 9, 21, 0, 12, 44)
assert result == expected
result = Timestamp.max.floor("s")
expected = Timestamp.max - Timedelta(854775807)
assert result == expected
with pytest.raises(OverflowError, match="value too large"):
Timestamp.min.floor("s")
# the second message here shows up in windows builds
msg = "|".join(
["Python int too large to convert to C long", "int too big to convert"]
)
with pytest.raises(OverflowError, match=msg):
Timestamp.max.ceil("s")
@pytest.mark.parametrize("n", range(100))
@pytest.mark.parametrize(
"method", [Timestamp.round, Timestamp.floor, Timestamp.ceil]
)
def test_round_sanity(self, method, n):
iinfo = np.iinfo(np.int64)
val = np.random.randint(iinfo.min + 1, iinfo.max, dtype=np.int64)
ts = Timestamp(val)
def checker(res, ts, nanos):
if method is Timestamp.round:
diff = np.abs((res - ts).value)
assert diff <= nanos / 2
elif method is Timestamp.floor:
assert res <= ts
elif method is Timestamp.ceil:
assert res >= ts
assert method(ts, "ns") == ts
res = method(ts, "us")
nanos = 1000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "ms")
nanos = 1_000_000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "s")
nanos = 1_000_000_000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "min")
nanos = 60 * 1_000_000_000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "h")
nanos = 60 * 60 * 1_000_000_000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
res = method(ts, "D")
nanos = 24 * 60 * 60 * 1_000_000_000
assert np.abs((res - ts).value) < nanos
assert res.value % nanos == 0
checker(res, ts, nanos)
# --------------------------------------------------------------
# Timestamp.replace
def test_replace_naive(self):
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00")
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00")
assert result == expected
def test_replace_aware(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
ts = Timestamp("2016-01-01 09:00:00", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00", tz=tz)
assert result == expected
def test_replace_preserves_nanos(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(hour=0)
expected = Timestamp("2016-01-01 00:00:00.000000123", tz=tz)
assert result == expected
def test_replace_multiple(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
# replacing datetime components with and w/o presence of a timezone
# test all
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
result = ts.replace(
year=2015,
month=2,
day=2,
hour=0,
minute=5,
second=5,
microsecond=5,
nanosecond=5,
)
expected = Timestamp("2015-02-02 00:05:05.000005005", tz=tz)
assert result == expected
def test_replace_invalid_kwarg(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = r"replace\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
ts.replace(foo=5)
def test_replace_integer_args(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH#14621, GH#7825
ts = Timestamp("2016-01-01 09:00:00.000000123", tz=tz)
msg = "value must be an integer, received <class 'float'> for hour"
with pytest.raises(ValueError, match=msg):
ts.replace(hour=0.1)
def test_replace_tzinfo_equiv_tz_localize_none(self):
# GH#14621, GH#7825
# assert conversion to naive is the same as replacing tzinfo with None
ts = Timestamp("2013-11-03 01:59:59.999999-0400", tz="US/Eastern")
assert ts.tz_localize(None) == ts.replace(tzinfo=None)
@td.skip_if_windows
def test_replace_tzinfo(self):
# GH#15683
dt = datetime(2016, 3, 27, 1)
tzinfo = pytz.timezone("CET").localize(dt, is_dst=False).tzinfo
result_dt = dt.replace(tzinfo=tzinfo)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None)
result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None)
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
assert result_dt.timestamp() == result_pd.timestamp()
assert result_dt == result_pd
assert result_dt == result_pd.to_pydatetime()
@pytest.mark.parametrize(
"tz, normalize",
[
(pytz.timezone("US/Eastern"), lambda x: x.tzinfo.normalize(x)),
(gettz("US/Eastern"), lambda x: x),
],
)
def test_replace_across_dst(self, tz, normalize):
# GH#18319 check that 1) timezone is correctly normalized and
# 2) that hour is not incorrectly changed by this normalization
ts_naive = Timestamp("2017-12-03 16:03:30")
ts_aware = conversion.localize_pydatetime(ts_naive, tz)
# Preliminary sanity-check
assert ts_aware == normalize(ts_aware)
# Replace across DST boundary
ts2 = ts_aware.replace(month=6)
# Check that `replace` preserves hour literal
assert (ts2.hour, ts2.minute) == (ts_aware.hour, ts_aware.minute)
# Check that post-replace object is appropriately normalized
ts2b = normalize(ts2)
assert ts2 == ts2b
def test_replace_dst_border(self):
# Gh 7825
t = Timestamp("2013-11-3", tz="America/Chicago")
result = t.replace(hour=3)
expected = Timestamp("2013-11-3 03:00:00", tz="America/Chicago")
assert result == expected
@pytest.mark.parametrize("fold", [0, 1])
@pytest.mark.parametrize("tz", ["dateutil/Europe/London", "Europe/London"])
def test_replace_dst_fold(self, fold, tz):
# GH 25017
d = datetime(2019, 10, 27, 2, 30)
ts = Timestamp(d, tz=tz)
result = ts.replace(hour=1, fold=fold)
expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize(
tz, ambiguous=not fold
)
assert result == expected
# --------------------------------------------------------------
# Timestamp.normalize
@pytest.mark.parametrize("arg", ["2013-11-30", "2013-11-30 12:00:00"])
def test_normalize(self, tz_naive_fixture, arg):
tz = tz_naive_fixture
ts = Timestamp(arg, tz=tz)
result = ts.normalize()
expected = Timestamp("2013-11-30", tz=tz)
assert result == expected
def test_normalize_pre_epoch_dates(self):
# GH: 36294
result = Timestamp("1969-01-01 09:00:00").normalize()
expected = Timestamp("1969-01-01 00:00:00")
assert result == expected
# --------------------------------------------------------------
@td.skip_if_windows
def test_timestamp(self):
# GH#17329
# tz-naive --> treat it as if it were UTC for purposes of timestamp()
ts = Timestamp.now()
uts = ts.replace(tzinfo=utc)
assert ts.timestamp() == uts.timestamp()
tsc = Timestamp("2014-10-11 11:00:01.12345678", tz="US/Central")
utsc = tsc.tz_convert("UTC")
# utsc is a different representation of the same time
assert tsc.timestamp() == utsc.timestamp()
# datetime.timestamp() converts in the local timezone
with tm.set_timezone("UTC"):
# should agree with datetime.timestamp method
dt = ts.to_pydatetime()
assert dt.timestamp() == ts.timestamp()
@pytest.mark.parametrize("fold", [0, 1])
def test_replace_preserves_fold(fold):
# GH 37610. Check that replace preserves Timestamp fold property
tz = gettz("Europe/Moscow")
ts = Timestamp(year=2009, month=10, day=25, hour=2, minute=30, fold=fold, tzinfo=tz)
ts_replaced = ts.replace(second=1)
assert ts_replaced.fold == fold
| bsd-3-clause | -8,175,025,607,885,940,000 | 33.763462 | 88 | 0.551308 | false |
mkieszek/odoo | addons/purchase/purchase.py | 1 | 104421 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.exceptions import UserError
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise UserError(_("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft RFQ'),
('sent', 'RFQ Sent'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Vendor Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your vendor. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your vendor."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Vendor', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the vendor to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the vendor price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the vendor must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)],'bid':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='datetime', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits=0, string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits=0, string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits=0, string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position_id': fields.many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True),
'group_id': fields.many2one('procurement.group', string="Procurement Group"),
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').next_by_code(cr, uid, 'purchase.order') or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise UserError(_('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'approved':
return 'purchase.mt_rfq_approved'
elif 'state' in init_values and record.state == 'confirmed':
return 'purchase.mt_rfq_confirmed'
elif 'state' in init_values and record.state == 'done':
return 'purchase.mt_rfq_done'
return super(purchase_order, self)._track_subtype(cr, uid, ids, init_values, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position_id': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position_id': fp or supplier.property_account_position_id and supplier.property_account_position_id.id or False,
'payment_term_id': supplier.property_supplier_payment_term_id.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
action_id = mod_obj.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
result = act_obj.read(cr, uid, action_id, context=context)
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise UserError(_('Please create Invoices.'))
if len(inv_ids) > 1:
result['domain'] = [('id', 'in', inv_ids)]
else:
res = mod_obj.xmlid_to_res_id(cr, uid, 'account.invoice_supplier_form')
result['views'] = [(res, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Vendor Bills'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
bid_date = fields.date.context_today(self, cr, uid, context=context)
self.message_post(cr, uid, ids, body=_("Bid received on %s") % (bid_date), context=context)
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': bid_date})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise UserError(_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense_id.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ_id.id
if not acc_id:
raise UserError(_('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ_id', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position_id or False
#For anglo-saxon accounting
account_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
if po_line.company_id.anglo_saxon_accounting and po_line.product_id and not po_line.product_id.type == 'service':
acc_id = po_line.product_id.property_stock_account_input and po_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_stock_account_input_categ_id and po_line.product_id.categ_id.property_stock_account_input_categ_id.id
if acc_id:
fpos = po_line.order_id.fiscal_position_id or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_ids': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise UserError(_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable_id.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line_ids': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position_id': order.fiscal_position_id.id or False,
'payment_term_id': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
context = context or {}
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state == 'done':
raise UserError(_('Unable to cancel the purchase order %s.') % (purchase.name) + _('You have already received some goods for it. '))
# Check action_cancel
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
# Check procurements not related to stock move yet
if not context.get('cancel_procurement'):
cancel_procurements = []
cancel_moves = []
exception_procurements = []
for line in purchase.order_line:
if line.procurement_ids:
cancel_procurements += [x.id for x in line.procurement_ids if x.state not in ('cancel', 'exception') and x.rule_id.propagate]
exception_procurements += [x.id for x in line.procurement_ids if x.state not in ('cancel', 'exception') and not x.rule_id.propagate]
cancel_moves += [x.move_dest_id.id for x in line.procurement_ids if x.move_dest_id and x.move_dest_id.state!='cancel' and x.rule_id.propagate]
if cancel_moves:
cancel_moves = list(set(cancel_moves))
self.pool['stock.move'].action_cancel(cr, uid, cancel_moves, context=context)
if cancel_procurements:
cancel_procurements = list(set(cancel_procurements))
self.pool['procurement.order'].write(cr, uid, cancel_procurements, {'state': 'cancel'}, context=context)
if exception_procurements:
exception_procurements = list(set(exception_procurements))
self.pool['procurement.order'].write(cr, uid, exception_procurements, {'state': 'exception'}, context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise UserError(_("Unable to cancel this purchase order.") + " " + _("You must first cancel all invoices related to this purchase order."))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
if order.group_id:
new_group = order.group_id.id
else:
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
return picking_id
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
for id in ids:
self.message_post(cr, uid, id, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position_id': porder.fiscal_position_id and porder.fiscal_position_id.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
def _set_po_lines_invoiced(self, cr, uid, ids, context=None):
for po in self.browse(cr, uid, ids, context=context):
is_invoiced = []
if po.invoice_method == 'picking':
# We determine the invoiced state of the PO line based on the invoiced state
# of the associated moves. This should cover all possible cases:
# - all moves are done and invoiced
# - a PO line is split into multiple moves (e.g. if multiple pickings): some
# pickings are done, some are in progress, some are cancelled
for po_line in po.order_line:
if (po_line.move_ids and
all(move.state in ('done', 'cancel') for move in po_line.move_ids) and
not all(move.state == 'cancel' for move in po_line.move_ids) and
all(move.invoice_state == 'invoiced' for move in po_line.move_ids if move.state == 'done')):
is_invoiced.append(po_line.id)
else:
for po_line in po.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
if is_invoiced:
self.pool['purchase.order.line'].write(cr, uid, is_invoiced, {'invoiced': True})
workflow.trg_write(uid, 'purchase.order', po.id, cr)
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
cur = line.order_id.pricelist_id.currency_id
res[line.id] = line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=line.order_id.partner_id)['total_excluded']
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits=0),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise UserError(_('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, replace=False, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', replace=True, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
if not uom_id:
uom_id = self.default_get(cr, uid, ['product_uom'], context=context).get('product_uom', False)
res['value']['product_uom'] = uom_id
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise UserError(_('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise UserError(_('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
if replace:
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected vendor has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancels(self, cr, uid, ids, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
lines_to_cancel = []
uom_obj = self.pool.get("product.uom")
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise UserError(
_('Can not cancel this procurement like this as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
if procurement.purchase_line_id.id not in lines_to_cancel:
lines_to_cancel += [procurement.purchase_line_id.id]
if lines_to_cancel:
purchase_line_obj.action_cancel(cr, uid, lines_to_cancel, context=context)
purchase_line_obj.unlink(cr, uid, lines_to_cancel, context=context)
return super(procurement_order, self).propagate_cancels(cr, uid, ids, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
#TODO: Autocommit needed?
def run(self, cr, uid, ids, autocommit=False, context=None):
procs = self.browse(cr, uid, ids, context=context)
to_assign = [x for x in procs if x.state not in ('running', 'done')]
self._assign_multi(cr, uid, to_assign, context=context)
buy_ids = [x.id for x in to_assign if x.rule_id and x.rule_id.action == 'buy']
if buy_ids:
result_dict = self.make_po(cr, uid, buy_ids, context=context)
runnings = []
exceptions = []
for proc in result_dict.keys():
if result_dict[proc]:
runnings += [proc]
else:
exceptions += [proc]
if runnings:
self.write(cr, uid, runnings, {'state': 'running'}, context=context)
if exceptions:
self.write(cr, uid, exceptions, {'state': 'exception'}, context=context)
set_others = set(ids) - set(buy_ids)
return super(procurement_order, self).run(cr, uid, list(set_others), context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the vendor info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Vendor of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No vendor defined for this product !')
elif not partner:
message = _('No default vendor defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the vendor')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise UserError(_('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main vendor of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_procs(self, cr, uid, procurements, partner, schedule_date, context=None):
res = {}
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
pricelist_id = partner.property_product_pricelist_purchase.id
prices_qty = []
qty = {}
for procurement in procurements:
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
uom_id = procurement.product_id.uom_po_id.id
qty[procurement.product_id.id] = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty[procurement.product_id.id] = max(qty[procurement.product_id.id], seller_qty)
prices_qty += [(procurement.product_id, qty[procurement.product_id.id], partner)]
prices = pricelist_obj.price_get_multi(cr, uid, [pricelist_id], prices_qty)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
names = prod_obj.name_get(cr, uid, [x.product_id.id for x in procurements], context=context)
names_dict = {}
for id, name in names:
names_dict[id] = name
for procurement in procurements:
taxes_ids = procurement.product_id.supplier_taxes_id
# It is necessary to have the appropriate fiscal position to get the right tax mapping
fp = acc_pos_obj.get_fiscal_position(cr, uid, None, partner.id, context=context)
if fp:
fp = acc_pos_obj.browse(cr, uid, fp, context=context)
taxes = acc_pos_obj.map_tax(cr, uid, fp, taxes_ids)
name = names_dict[procurement.product_id.id]
if procurement.product_id.description_purchase:
name += '\n' + procurement.product_id.description_purchase
price = prices[procurement.product_id.id][pricelist_id]
price = uom_obj._compute_price(cr, uid, procurement.product_uom.id, price, to_uom_id=procurement.product_id.product_tmpl_id.uom_po_id.id)
values = {
'name': name,
'product_qty': qty[procurement.product_id.id],
'product_id': procurement.product_id.id,
'product_uom': procurement.product_id.uom_po_id.id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
'procurement_ids': [(4, procurement.id)]
}
res[procurement.id] = values
return res
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_uom.id})[pricelist_id]
return qty, price
def _get_grouping_dicts(self, cr, uid, ids, context=None):
"""
It will group the procurements according to the pos they should go into. That way, lines going to the same
po, can be processed at once.
Returns two dictionaries:
add_purchase_dicts: key: po value: procs to add to the po
create_purchase_dicts: key: values for proc to create (not that necessary as they are in procurement => TODO),
values: procs to add
"""
po_obj = self.pool.get('purchase.order')
# Regroup POs
cr.execute("""
SELECT psi.name, p.id, pr.id, pr.picking_type_id, p.location_id, p.partner_dest_id, p.company_id, p.group_id,
pr.group_propagation_option, pr.group_id, psi.qty
FROM procurement_order AS p
LEFT JOIN procurement_rule AS pr ON pr.id = p.rule_id
LEFT JOIN procurement_group AS pg ON p.group_id = pg.id,
product_supplierinfo AS psi, product_product AS pp
WHERE
p.product_id = pp.id AND p.id in %s AND psi.product_tmpl_id = pp.product_tmpl_id
AND (psi.company_id = p.company_id or psi.company_id IS NULL)
ORDER BY psi.sequence,
psi.name, p.rule_id, p.location_id, p.company_id, p.partner_dest_id, p.group_id
""", (tuple(ids), ))
res = cr.fetchall()
old = False
# A giant dict for grouping lines, ... to do at once
create_purchase_procs = {} # Lines to add to a newly to create po
add_purchase_procs = {} # Lines to add/adjust in an existing po
proc_seller = {} # To check we only process one po
for partner, proc, rule, pick_type, location, partner_dest, company, group, group_propagation, fixed_group, qty in res:
if not proc_seller.get(proc):
proc_seller[proc] = partner
new = partner, rule, pick_type, location, company, group, group_propagation, fixed_group
if new != old:
old = new
dom = [
('partner_id', '=', partner), ('state', '=', 'draft'), ('picking_type_id', '=', pick_type),
('location_id', '=', location), ('company_id', '=', company), ('dest_address_id', '=', partner_dest)]
if group_propagation == 'propagate':
dom += [('group_id', '=', group)]
elif group_propagation == 'fixed':
dom += [('group_id', '=', fixed_group)]
available_draft_po_ids = po_obj.search(cr, uid, dom, context=context)
available_draft_po = available_draft_po_ids and available_draft_po_ids[0] or False
# Add to dictionary
if available_draft_po:
if add_purchase_procs.get(available_draft_po):
add_purchase_procs[available_draft_po] += [proc]
else:
add_purchase_procs[available_draft_po] = [proc]
else:
if create_purchase_procs.get(new):
create_purchase_procs[new] += [proc]
else:
create_purchase_procs[new] = [proc]
return add_purchase_procs, create_purchase_procs
def make_po(self, cr, uid, ids, context=None):
res = {}
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
uom_obj = self.pool.get('product.uom')
acc_pos_obj = self.pool.get('account.fiscal.position')
add_purchase_procs, create_purchase_procs = self._get_grouping_dicts(cr, uid, ids, context=context)
procs_done = []
# Let us check existing purchase orders and add/adjust lines on them
for add_purchase in add_purchase_procs.keys():
procs_done += add_purchase_procs[add_purchase]
po = po_obj.browse(cr, uid, add_purchase, context=context)
lines_to_update = {}
line_values = []
procurements = self.browse(cr, uid, add_purchase_procs[add_purchase], context=context)
po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', add_purchase), ('product_id', 'in', [x.product_id.id for x in procurements])], context=context)
po_lines = po_line_obj.browse(cr, uid, po_line_ids, context=context)
po_prod_dict = {}
for pol in po_lines:
po_prod_dict[pol.product_id.id] = pol
procs_to_create = []
#Check which procurements need a new line and which need to be added to an existing one
for proc in procurements:
if po_prod_dict.get(proc.product_id.id):
po_line = po_prod_dict[proc.product_id.id]
# FIXME: compute quantity using `_calc_new_qty_price` method.
# new_qty, new_price = self._calc_new_qty_price(cr, uid, proc, po_line=po_line, context=context)
uom_id = po_line.product_uom # Convert to UoM of existing line
qty = uom_obj._compute_qty_obj(cr, uid, proc.product_uom, proc.product_qty, uom_id)
if lines_to_update.get(po_line):
lines_to_update[po_line] += [(proc, qty)]
else:
lines_to_update[po_line] = [(proc, qty)]
else:
procs_to_create.append(proc)
procs = []
# FIXME: these are not real tracking values, it should be fixed if tracking values for one2many
# are managed
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, values in tracked_values.iteritems():
message += '<div> • <b>%s</b>: ' % name
message += '%s</div>' % values
return message
# Update the quantities of the lines that need to
for line in lines_to_update.keys():
tot_qty = 0
for proc, qty in lines_to_update[line]:
tot_qty += qty
self.message_post(cr, uid, proc.id, body=_("Quantity added in existing Purchase Order Line"), context=context)
msg = format_message(_('Quantity added in existing Purchase Order Line'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
line_values += [(1, line.id, {'product_qty': line.product_qty + tot_qty, 'procurement_ids': [(4, x[0].id) for x in lines_to_update[line]]})]
# Create lines for which no line exists yet
if procs_to_create:
partner = po.partner_id
schedule_date = datetime.strptime(po.minimum_planned_date, DEFAULT_SERVER_DATETIME_FORMAT)
value_lines = self._get_po_line_values_from_procs(cr, uid, procs_to_create, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
for proc in procs_to_create:
self.message_post(cr, uid, [proc.id], body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
msg = format_message(_('Purchase order line added'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
po_obj.write(cr, uid, [add_purchase], {'order_line': line_values},context=context)
# Create new purchase orders
partner_obj = self.pool.get("res.partner")
new_pos = []
for create_purchase in create_purchase_procs.keys():
procs_done += create_purchase_procs[create_purchase]
line_values = []
procurements = self.browse(cr, uid, create_purchase_procs[create_purchase], context=context)
partner = partner_obj.browse(cr, uid, create_purchase[0], context=context)
#Create purchase order itself:
procurement = procurements[0]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, procurement.company_id, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, procurement.company_id, schedule_date, context=context)
value_lines = self._get_po_line_values_from_procs(cr, uid, procurements, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
name = seq_obj.next_by_code(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
gpo = procurement.rule_id.group_propagation_option
group = (gpo == 'fixed' and procurement.rule_id.group_id.id) or (gpo == 'propagate' and procurement.group_id.id) or False
fp = acc_pos_obj.get_fiscal_position(cr, uid, None, partner.id, context=context)
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': create_purchase[0],
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position_id': fp,
'payment_term_id': partner.property_supplier_payment_term_id.id,
'dest_address_id': procurement.partner_dest_id.id,
'group_id': group,
'order_line': line_values,
}
new_po = po_obj.create(cr, uid, po_vals, context=context)
new_pos.append(new_po)
for proc in create_purchase_procs[create_purchase]:
self.message_post(cr, uid, proc, body=_("Draft Purchase Order created"), context=context)
other_proc_ids = list(set(ids) - set(procs_done))
res = dict.fromkeys(ids, True)
if other_proc_ids:
other_procs = self.browse(cr, uid, other_proc_ids, context=context)
for procurement in other_procs:
res[procurement.id] = False
self.message_post(cr, uid, [procurement.id], _('There is no vendor associated to product %s') % (procurement.product_id.name))
return res
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['confirmed', 'approved', 'except_picking', 'except_invoice', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['purchase.report'].read_group(cr, uid, domain, ['product_id', 'quantity'], ['product_id'], context=context):
r[group['product_id'][0]] = group['quantity']
return r
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class product_category(osv.Model):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, auto_commit=False, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
_columns = {
'purchase_ids': fields.many2many('purchase.order', 'purchase_invoice_rel', 'invoice_id',
'purchase_id', 'Purchases', copy=False,
help="Purchases linked to this invoice")
}
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice received"), context=context)
purchase_order_obj._set_po_lines_invoiced(cr, user_id, [po_id], context=context)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
if self.company_id.anglo_saxon_accounting:
if inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line_ids:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ_id and i_line.product_id.categ_id.property_stock_account_input_categ_id.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position_id or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
account_prec = inv.company_id.currency_id.decimal_places
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'], i_line.price_unit * (1-(i_line.discount or 0.0)/100.0),
inv.currency_id.id, line['quantity'])['total_excluded']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_ids': fields.many2many('purchase.order.line', 'purchase_order_line_invoice_rel', 'invoice_id','order_line_id',
'Purchase Order Lines', readonly=True, copy=False)
} | agpl-3.0 | 917,454,773,786,240,900 | 54.662047 | 302 | 0.586893 | false |
orlenko/plei | pleiapp/migrations/0002_auto__add_tagline.py | 1 | 19744 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tagline'
db.create_table(u'pleiapp_tagline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'pleiapp', ['Tagline'])
# Adding M2M table for field related_resources on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'resource_id'])
# Adding M2M table for field related_faqs on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_faqs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'faq_id'])
# Adding M2M table for field related_resources on 'Faq'
m2m_table_name = db.shorten_name(u'pleiapp_faq_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['faq_id', 'resource_id'])
def backwards(self, orm):
# Deleting model 'Tagline'
db.delete_table(u'pleiapp_tagline')
# Removing M2M table for field related_resources on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_resources'))
# Removing M2M table for field related_faqs on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_faqs'))
# Removing M2M table for field related_resources on 'Faq'
db.delete_table(db.shorten_name(u'pleiapp_faq_related_resources'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'pleiapp.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.dictionary': {
'Meta': {'ordering': "('title',)", 'object_name': 'Dictionary'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_dictionary_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Dictionary']"}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dictionarys'", 'to': u"orm['auth.User']"})
},
u'pleiapp.faq': {
'Meta': {'ordering': "('title',)", 'object_name': 'Faq'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_faqs_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Faq']"}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'faqs'", 'to': u"orm['auth.User']"})
},
u'pleiapp.frontpageitem': {
'Meta': {'object_name': 'FrontPageItem'},
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'pleiapp.resource': {
'Meta': {'ordering': "('title',)", 'object_name': 'Resource'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_resources_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Resource']"}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['auth.User']"})
},
u'pleiapp.tagline': {
'Meta': {'object_name': 'Tagline'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'pleiapp.topic': {
'Meta': {'ordering': "('title',)", 'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.type': {
'Meta': {'ordering': "('title',)", 'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['pleiapp'] | bsd-2-clause | -8,362,666,055,092,364,000 | 80.929461 | 191 | 0.562804 | false |
leonth/private-configs | sublime-text-3/Packages/SublimePythonIDE/server/server.py | 1 | 13177 | import os
import sys
import time
import logging
import tempfile
import threading
if sys.version_info[0] == 2:
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), "..", "lib", "python2"))
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
else:
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), "..", "lib", "python3"))
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import Binary
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "lib"))
from linter import do_linting
from rope.base import libutils
from rope.base.project import Project
from rope.base.exceptions import ModuleSyntaxError
from rope.contrib.codeassist import (
code_assist, sorted_proposals, get_doc, get_definition_location
)
# global state of the server process
last_heartbeat = None
# constants
HEARTBEAT_TIMEOUT = 19
NO_ROOT_PATH = -1
class RopeProjectMixin(object):
"""
Creates and manages Rope projects"""
def __init__(self):
self.projects = {}
self.buffer_tmpfile_map = {}
self.tempfiles = []
def __del__(self):
'''Cleanup temporary files when server is deallocated. Although
Python destructors are not guaranteed to be run it is still ok to
do cleanup here, as a tempfile surviving the server in TEMPDIR
is not too big of a problem.'''
for tfn in self.tempfiles:
os.unlink(tfn)
def project_for(self, project_path, file_path, source=""):
# scratch buffer case: create temp file and proj for buffer and cache it
if file_path.startswith("BUFFER:"):
if file_path in self.projects:
project = self.projects[file_path]
file_path = self.buffer_tmpfile_map[file_path]
else:
original_file_path = file_path
file_path = self._create_temp_file(source)
project = self._create_single_file_project(file_path)
self.projects[original_file_path] = project
self.buffer_tmpfile_map[original_file_path] = file_path
# single file case (or scratch buffer with client not sending buffer_id)
# create temp file and proj, and buffer if file_name given
elif project_path == NO_ROOT_PATH:
if file_path in self.projects:
project = self.projects[file_path]
else:
if not file_path:
# this path is deprecated and should not be used anymore
file_path = self._create_temp_file(source)
project = self._create_single_file_project(file_path)
else:
project = self._create_single_file_project(file_path)
self.projects[file_path] = project
# "usual" case: a real file with a project directory is given
else:
if project_path in self.projects:
project = self.projects[project_path]
else:
project = self._create_project(project_path)
self.projects[project_path] = project
return project, file_path
def list_projects(self):
return self.projects.keys()
def _create_project(self, path):
project = Project(path, fscommands=None, ropefolder=None)
return project
def _create_single_file_project(self, path):
folder = os.path.dirname(path)
ignored_res = os.listdir(folder)
ignored_res.remove(os.path.basename(path))
project = Project(
folder, ropefolder=None,
ignored_resources=ignored_res, fscommands=None)
return project
def _create_temp_file(self, content):
"""
Creates a temporary named file for use by Rope. It expects to
be able to read files from disk in some places, so there is no
easy way around creating these files. We try to delete those
files in the servers destructor (see __del__).
"""
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(content.encode("utf-8"))
tf_path = tmpfile.name
self.tempfiles.append(tf_path)
tmpfile.close()
return tf_path
class RopeFunctionsMixin(object):
"""Uses Rope to generate completion proposals, depends on RopeProjectMixin
"""
def profile_completions(self, source, project_path, file_path, loc):
"""
Only for testing purposes::
runs Rope's code completion functionality in the python profiler
and saves statistics, then reruns for actual results
"""
try:
import cProfile as profile
except:
import profile
profile.runctx(
"self.completions(source, project_path, file_path, loc)",
globals(), locals(), os.path.expanduser("~/SublimePython.stats"))
return self.completions(source, project_path, file_path, loc)
def completions(self, source, project_path, file_path, loc):
"""
Get completions from the underlying Rope library and returns it back
to the editor interface
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a list of tuples of strings
"""
project, resource = self._get_resource(project_path, file_path, source)
try:
proposals = code_assist(
project, source, loc, resource=resource, maxfixes=3)
proposals = sorted_proposals(proposals)
except ModuleSyntaxError:
proposals = []
except Exception:
import traceback
traceback.print_exc()
proposals = []
finally:
proposals = [
(self._proposal_string(p), self._insert_string(p))
for p in proposals if p.name != 'self='
]
return proposals
def documentation(self, source, project_path, file_path, loc):
"""
Search for documentation about the word in the current location
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a string containing the documentation
"""
project, resource = self._get_resource(project_path, file_path, source)
try:
doc = get_doc(project, source, loc, resource=resource, maxfixes=3)
except ModuleSyntaxError:
doc = None
return doc
def definition_location(self, source, project_path, file_path, loc):
"""
Get a global definition location and returns it back to the editor
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a tuple containing the path and the line number
"""
project, resource = self._get_resource(project_path, file_path, source)
real_path, def_lineno = (None, None)
try:
def_resource, def_lineno = get_definition_location(
project, source, loc, resource=resource, maxfixes=3)
if def_resource:
real_path = def_resource.real_path
except ModuleSyntaxError:
pass
return real_path, def_lineno
def report_changed(self, project_path, file_path):
"""
Reports the change of the contents of file_path.
:param project_path: the actual project path
:param file_path: the file path
"""
if project_path != NO_ROOT_PATH:
project, file_path = self.project_for(project_path, file_path)
libutils.report_change(project, file_path, "")
def _proposal_string(self, p):
"""
Build and return a string for the proposals of completions
:param p: the original proposal structure
"""
if p.parameters:
params = [par for par in p.parameters if par != 'self']
result = '{name}({params})'.format(
name=p.name,
params=', '.join(param for param in params)
)
else:
result = p.name
return '{result}\t({scope}, {type})'.format(
result=result, scope=p.scope, type=p.type)
def _insert_string(self, p):
"""
"""
if p.parameters:
params = [par for par in p.parameters if par != 'self']
param_snippet = ", ".join(
"${%i:%s}" %
(idx + 1, param) for idx, param in enumerate(params))
result = "%s(%s)" % (p.name, param_snippet)
else:
result = p.name
return result
def _get_resource(self, project_path, file_path, source):
"""Get and returns project and resource objects from Rope library
"""
project, file_path = self.project_for(project_path, file_path, source)
return project, libutils.path_to_resource(project, file_path)
class HeartBeatMixin(object):
"""
Waits for heartbeat messages from SublimeText. The main thread
kills the process if no heartbeat arrived in HEARTBEAT_TIMEOUT seconds.
"""
def __init__(self):
self.heartbeat()
def heartbeat(self):
global last_heartbeat
last_heartbeat = time.time()
logging.debug('bumbum %f', last_heartbeat)
class LinterMixin(object):
"""
Performs a PyFlakes and PEP8 check on the input code, returns either a
list of messages or a single syntax error in case of an error while
parsing the code. The receiver thus has to check for these two
cases.
"""
def check_syntax(self, code, encoding, lint_settings, filename):
'''The linting mixin does not use the project_for machinery,
but uses the linters directy.'''
try:
codes = do_linting(lint_settings, code, encoding, filename)
except Exception:
import traceback
sys.stderr.write(traceback.format_exc())
import pickle
ret = Binary(pickle.dumps(codes))
return ret
class Server(RopeProjectMixin, HeartBeatMixin,
RopeFunctionsMixin, LinterMixin):
"""
Python's SimpleXMLRPCServer accepts just one call of
register_instance(), so this class just combines the above
mixins.
"""
def __init__(self):
RopeProjectMixin.__init__(self)
RopeFunctionsMixin.__init__(self)
HeartBeatMixin.__init__(self)
LinterMixin.__init__(self)
class DebuggingServer(Server):
"""
Prints calls and exceptions to stderr
"""
def __init__(self):
Server.__init__(self)
def _dispatch(self, method, params):
try:
sys.stderr.write("SublimePythonIDE Server is called: %s\n" % str(method))
method = getattr(self, method)
return method(*params)
except Exception as e:
sys.stderr.write("SublimePythonIDE Server Error: %s\n" % str(e))
import traceback
traceback.print_exc()
class XMLRPCServerThread(threading.Thread):
"""
Runs a SimpleXMLRPCServer in a new thread, so that the main
thread can watch for the heartbeats and kill the process if no
heartbeat messages arrive in time
:param port: the port where to listen to
:type port: int
"""
def __init__(self, port, debug):
threading.Thread.__init__(self)
self.port = port
self.daemon = True
self.debug = debug
def run(self):
self.server = SimpleXMLRPCServer(
("localhost", port), allow_none=True, logRequests=False)
# enable debugging?
if self.debug:
sys.stderr.write("SublimePythonIDE Server is starting in Debug mode\n")
self.server.register_instance(DebuggingServer())
else:
self.server.register_instance(Server())
self.server.serve_forever()
if __name__ == '__main__':
try:
# single argument to this process should be the port to listen on
port = int(sys.argv[1])
# second argument may be "--debug" in which case the server prints to stderr
debug = False
if len(sys.argv) > 2 and sys.argv[2].strip() == "--debug":
debug = True
# the SimpleXMLRPCServer is run in a new thread
server_thread = XMLRPCServerThread(port, debug)
server_thread.start()
# the main thread checks for heartbeat messages
while 1:
time.sleep(HEARTBEAT_TIMEOUT)
if time.time() - last_heartbeat > HEARTBEAT_TIMEOUT:
sys.exit()
except Exception as e:
sys.stderr.write("SublimePythonIDE Server Error: %s\n" % str(e))
import traceback
traceback.print_exc()
| mit | 5,703,854,125,502,567,000 | 32.191436 | 88 | 0.600364 | false |
zenefits/sentry | src/sentry/testutils/cases.py | 1 | 14473 | """
sentry.testutils.cases
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = (
'TestCase', 'TransactionTestCase', 'APITestCase', 'AuthProviderTestCase',
'RuleTestCase', 'PermissionTestCase', 'PluginTestCase', 'CliTestCase',
'AcceptanceTestCase',
)
import base64
import os
import os.path
import pytest
import six
import types
from click.testing import CliRunner
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import login
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, TransactionTestCase
from django.utils.importlib import import_module
from exam import before, fixture, Exam
from pkg_resources import iter_entry_points
from rest_framework.test import APITestCase as BaseAPITestCase
from six.moves.urllib.parse import urlencode
from sentry import auth
from sentry.auth.providers.dummy import DummyProvider
from sentry.constants import MODULE_ROOT
from sentry.models import GroupMeta, ProjectOption
from sentry.plugins import plugins
from sentry.rules import EventState
from sentry.utils import json
from sentry.utils.auth import SSO_SESSION_KEY
from .fixtures import Fixtures
from .helpers import AuthProvider, Feature, get_auth_header, TaskRunner, override_options
DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
class BaseTestCase(Fixtures, Exam):
urls = 'sentry.web.urls'
def assertRequiresAuthentication(self, path, method='GET'):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp['Location'].startswith('http://testserver' + reverse('sentry-login'))
@before
def setup_dummy_auth_provider(self):
auth.register('dummy', DummyProvider)
self.addCleanup(auth.unregister, 'dummy', DummyProvider)
@before
def setup_session(self):
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session.save()
self.session = session
def tasks(self):
return TaskRunner()
def feature(self, name, active=True):
"""
>>> with self.feature('feature:name')
>>> # ...
"""
return Feature(name, active)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
session_cookie = settings.SESSION_COOKIE_NAME
self.client.cookies[session_cookie] = self.session.session_key
self.client.cookies[session_cookie].update(cookie_data)
def login_as(self, user, organization_id=None):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = HttpRequest()
request.session = self.session
login(request, user)
request.user = user
if organization_id:
request.session[SSO_SESSION_KEY] = six.text_type(organization_id)
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(
MODULE_ROOT,
'tests',
'fixtures',
filepath,
)
with open(filepath, 'rb') as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def _makeMessage(self, data):
return json.dumps(data).encode('utf-8')
def _makePostMessage(self, data):
return base64.b64encode(self._makeMessage(data))
def _postWithHeader(self, data, key=None, secret=None, protocol=None):
if key is None:
key = self.projectkey.public_key
secret = self.projectkey.secret_key
message = self._makePostMessage(data)
with self.tasks():
resp = self.client.post(
reverse('sentry-api-store'), message,
content_type='application/octet-stream',
HTTP_X_SENTRY_AUTH=get_auth_header(
'_postWithHeader/0.0.0',
key,
secret,
protocol,
),
)
return resp
def _postCspWithHeader(self, data, key=None, **extra):
if isinstance(data, dict):
body = json.dumps({'csp-report': data})
elif isinstance(data, six.string_types):
body = data
path = reverse('sentry-api-csp-report', kwargs={'project_id': self.project.id})
path += '?sentry_key=%s' % self.projectkey.public_key
with self.tasks():
return self.client.post(
path, data=body,
content_type='application/csp-report',
HTTP_USER_AGENT=DEFAULT_USER_AGENT,
**extra
)
def _getWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
'sentry_data': message,
}
with self.tasks():
resp = self.client.get(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
**headers
)
return resp
def _postWithReferer(self, data, key=None, referer='sentry.io', protocol='4'):
if key is None:
key = self.projectkey.public_key
headers = {}
if referer is not None:
headers['HTTP_REFERER'] = referer
message = self._makeMessage(data)
qs = {
'sentry_version': protocol,
'sentry_client': 'raven-js/lol',
'sentry_key': key,
}
with self.tasks():
resp = self.client.post(
'%s?%s' % (reverse('sentry-api-store', args=(self.project.pk,)), urlencode(qs)),
data=message,
content_type='application/json',
**headers
)
return resp
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
@contextmanager
def dsn(self, dsn):
"""
A context manager that temporarily sets the internal client's DSN
"""
from raven.contrib.django.models import client
try:
client.set_dsn(dsn)
yield
finally:
client.set_dsn(None)
_postWithSignature = _postWithHeader
_postWithNewSignature = _postWithHeader
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
pass
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = 'dummy'
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != 'dummy' or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, data=None):
return self.rule_cls(
project=self.project,
data=data or {},
)
def get_state(self, **kwargs):
kwargs.setdefault('is_new', True)
kwargs.setdefault('is_regression', True)
kwargs.setdefault('is_sample', True)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user(is_superuser=False)
self.organization = self.create_organization(
owner=self.owner,
flags=0, # disable default allow_joinleave access
)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method='GET'):
self.login_as(user)
resp = getattr(self.client, method.lower())(path)
assert resp.status_code >= 300
def assert_member_can_access(self, path):
return self.assert_role_can_access(path, 'member')
def assert_teamless_member_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_can_access(user, path)
def assert_member_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'member')
def assert_manager_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'manager')
def assert_teamless_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='member', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_admin_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_teamless_admin_can_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_can_access(user, path)
def assert_team_admin_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'admin')
def assert_teamless_admin_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role='admin', teams=[],
)
self.assert_cannot_access(user, path)
def assert_team_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_can_access(self, path):
return self.assert_role_can_access(path, 'owner')
def assert_owner_cannot_access(self, path):
return self.assert_role_cannot_access(path, 'owner')
def assert_non_member_cannot_access(self, path):
user = self.create_user(is_superuser=False)
self.assert_cannot_access(user, path)
def assert_role_can_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_can_access(user, path)
def assert_role_cannot_access(self, path, role):
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization,
role=role, teams=[self.team],
)
self.assert_cannot_access(user, path)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
# Old plugins, plugin is a class, new plugins, it's an instance
# New plugins don't need to be registered
if isinstance(self.plugin, (type, types.ClassType)):
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
def assertAppInstalled(self, name, path):
for ep in iter_entry_points('sentry.apps'):
if ep.name == name:
ep_path = ep.module_name
if ep_path == path:
return
self.fail('Found app in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing app from entry_points: %r' % (name,))
def assertPluginInstalled(self, name, plugin):
path = type(plugin).__module__ + ':' + type(plugin).__name__
for ep in iter_entry_points('sentry.plugins'):
if ep.name == name:
ep_path = ep.module_name + ':' + '.'.join(ep.attrs)
if ep_path == path:
return
self.fail('Found plugin in entry_points, but wrong class. Got %r, expected %r' % (ep_path, path))
self.fail('Missing plugin from entry_points: %r' % (name,))
class CliTestCase(TestCase):
runner = fixture(CliRunner)
command = None
default_args = []
def invoke(self, *args):
args += tuple(self.default_args)
return self.runner.invoke(self.command, args, obj={})
@pytest.mark.usefixtures('browser')
class AcceptanceTestCase(TransactionTestCase):
def save_session(self):
self.session.save()
self.browser.save_cookie(
name=settings.SESSION_COOKIE_NAME,
value=self.session.session_key,
)
| bsd-3-clause | -1,316,308,732,302,333,000 | 30.669584 | 123 | 0.609549 | false |
Louiiiss/ros_asr | build/catkin_generated/generate_cached_setup.py | 1 | 1330 | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/louis/catkin_ws/devel;/opt/ros/kinetic".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/louis/catkin_ws/src/ros_asr/build/devel/env.sh')
output_filename = '/home/louis/catkin_ws/src/ros_asr/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| gpl-2.0 | -1,022,363,894,810,627,700 | 43.333333 | 102 | 0.715038 | false |
google-research/google-research | bigg/bigg/model/tree_model.py | 1 | 22863 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.nn.parameter import Parameter
from bigg.common.pytorch_util import glorot_uniform, MLP, BinaryTreeLSTMCell
from tqdm import tqdm
from bigg.model.util import AdjNode, ColAutomata, AdjRow
from bigg.model.tree_clib.tree_lib import TreeLib
from bigg.torch_ops import multi_index_select, PosEncoding
def hc_multi_select(ids_from, ids_to, h_froms, c_froms):
h_vecs = multi_index_select(ids_from,
ids_to,
*h_froms)
c_vecs = multi_index_select(ids_from,
ids_to,
*c_froms)
return h_vecs, c_vecs
def tree_state_select(h_bot, c_bot, h_buf, c_buf, fn_all_ids):
bot_froms, bot_tos, prev_froms, prev_tos = fn_all_ids()
if h_buf is None or prev_tos is None:
h_vecs = multi_index_select([bot_froms], [bot_tos], h_bot)
c_vecs = multi_index_select([bot_froms], [bot_tos], c_bot)
elif h_bot is None or bot_tos is None:
h_vecs = multi_index_select([prev_froms], [prev_tos], h_buf)
c_vecs = multi_index_select([prev_froms], [prev_tos], c_buf)
else:
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms],
[bot_tos, prev_tos],
[h_bot, h_buf], [c_bot, c_buf])
return h_vecs, c_vecs
def batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_all_ids, cell):
h_list = []
c_list = []
for i in range(2):
h_vecs, c_vecs = tree_state_select(h_bot, c_bot, h_buf, c_buf, lambda : fn_all_ids(i))
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
def batch_tree_lstm3(h_bot, c_bot, h_buf, c_buf, h_past, c_past, fn_all_ids, cell):
if h_past is None:
return batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, lambda i: fn_all_ids(i)[:-2], cell)
elif h_bot is None:
return batch_tree_lstm2(h_buf, c_buf, h_past, c_past, lambda i: fn_all_ids(i)[2:], cell)
elif h_buf is None:
return batch_tree_lstm2(h_bot, c_bot, h_past, c_past, lambda i: fn_all_ids(i)[0, 1, 4, 5], cell)
else:
h_list = []
c_list = []
for i in range(2):
bot_froms, bot_tos, prev_froms, prev_tos, past_froms, past_tos = fn_all_ids(i)
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms, past_froms],
[bot_tos, prev_tos, past_tos],
[h_bot, h_buf, h_past],
[c_bot, c_buf, c_past])
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
class FenwickTree(nn.Module):
def __init__(self, args):
super(FenwickTree, self).__init__()
self.init_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.init_c0 = Parameter(torch.Tensor(1, args.embed_dim))
glorot_uniform(self)
self.merge_cell = BinaryTreeLSTMCell(args.embed_dim)
self.summary_cell = BinaryTreeLSTMCell(args.embed_dim)
if args.pos_enc:
self.pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base)
else:
self.pos_enc = lambda x: 0
def reset(self, list_states=[]):
self.list_states = []
for l in list_states:
t = []
for e in l:
t.append(e)
self.list_states.append(t)
def append_state(self, state, level):
if level >= len(self.list_states):
num_aug = level - len(self.list_states) + 1
for i in range(num_aug):
self.list_states.append([])
self.list_states[level].append(state)
def forward(self, new_state=None):
if new_state is None:
if len(self.list_states) == 0:
return (self.init_h0, self.init_c0)
else:
self.append_state(new_state, 0)
pos = 0
while pos < len(self.list_states):
if len(self.list_states[pos]) >= 2:
lch_state, rch_state = self.list_states[pos] # assert the length is 2
new_state = self.merge_cell(lch_state, rch_state)
self.list_states[pos] = []
self.append_state(new_state, pos + 1)
pos += 1
state = None
for pos in range(len(self.list_states)):
if len(self.list_states[pos]) == 0:
continue
cur_state = self.list_states[pos][0]
if state is None:
state = cur_state
else:
state = self.summary_cell(state, cur_state)
return state
def forward_train(self, h_bot, c_bot, h_buf0, c_buf0, prev_rowsum_h, prrev_rowsum_c):
# embed row tree
tree_agg_ids = TreeLib.PrepareRowEmbed()
row_embeds = [(self.init_h0, self.init_c0)]
if h_bot is not None:
row_embeds.append((h_bot, c_bot))
if prev_rowsum_h is not None:
row_embeds.append((prev_rowsum_h, prrev_rowsum_c))
if h_buf0 is not None:
row_embeds.append((h_buf0, c_buf0))
th_bot = h_bot
tc_bot = c_bot
for i, all_ids in enumerate(tree_agg_ids):
fn_ids = lambda x: all_ids[x]
if i:
th_bot = tc_bot = None
new_states = batch_tree_lstm3(th_bot, tc_bot,
row_embeds[-1][0], row_embeds[-1][1],
prev_rowsum_h, prrev_rowsum_c,
fn_ids, self.merge_cell)
row_embeds.append(new_states)
h_list, c_list = zip(*row_embeds)
joint_h = torch.cat(h_list, dim=0)
joint_c = torch.cat(c_list, dim=0)
# get history representation
init_select, all_ids, last_tos, next_ids, pos_info = TreeLib.PrepareRowSummary()
cur_state = (joint_h[init_select], joint_c[init_select])
ret_state = (joint_h[next_ids], joint_c[next_ids])
hist_rnn_states = []
hist_froms = []
hist_tos = []
for i, (done_from, done_to, proceed_from, proceed_input) in enumerate(all_ids):
hist_froms.append(done_from)
hist_tos.append(done_to)
hist_rnn_states.append(cur_state)
next_input = joint_h[proceed_input], joint_c[proceed_input]
sub_state = cur_state[0][proceed_from], cur_state[1][proceed_from]
cur_state = self.summary_cell(sub_state, next_input)
hist_rnn_states.append(cur_state)
hist_froms.append(None)
hist_tos.append(last_tos)
hist_h_list, hist_c_list = zip(*hist_rnn_states)
pos_embed = self.pos_enc(pos_info)
row_h = multi_index_select(hist_froms, hist_tos, *hist_h_list) + pos_embed
row_c = multi_index_select(hist_froms, hist_tos, *hist_c_list) + pos_embed
return (row_h, row_c), ret_state
class BitsRepNet(nn.Module):
def __init__(self, args):
super(BitsRepNet, self).__init__()
self.bits_compress = args.bits_compress
self.out_dim = args.embed_dim
assert self.out_dim >= self.bits_compress
self.device = args.device
def forward(self, on_bits, n_cols):
h = torch.zeros(1, self.out_dim).to(self.device)
h[0, :n_cols] = -1.0
h[0, on_bits] = 1.0
return h, h
class RecurTreeGen(nn.Module):
def __init__(self, args):
super(RecurTreeGen, self).__init__()
self.directed = args.directed
self.self_loop = args.self_loop
self.bits_compress = args.bits_compress
self.greedy_frac = args.greedy_frac
self.share_param = args.share_param
if not self.bits_compress:
self.leaf_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.leaf_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.topdown_left_embed = Parameter(torch.Tensor(2, args.embed_dim))
self.topdown_right_embed = Parameter(torch.Tensor(2, args.embed_dim))
glorot_uniform(self)
if self.bits_compress > 0:
self.bit_rep_net = BitsRepNet(args)
if self.share_param:
self.m_l2r_cell = BinaryTreeLSTMCell(args.embed_dim)
self.lr2p_cell = BinaryTreeLSTMCell(args.embed_dim)
self.pred_has_ch = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_left = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_right = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_cell_topdown = nn.LSTMCell(args.embed_dim, args.embed_dim)
self.m_cell_topright = nn.LSTMCell(args.embed_dim, args.embed_dim)
else:
fn_pred = lambda: MLP(args.embed_dim, [2 * args.embed_dim, 1])
fn_tree_cell = lambda: BinaryTreeLSTMCell(args.embed_dim)
fn_lstm_cell = lambda: nn.LSTMCell(args.embed_dim, args.embed_dim)
num_params = int(np.ceil(np.log2(args.max_num_nodes))) + 1
self.pred_has_ch = fn_pred()
pred_modules = [[] for _ in range(2)]
tree_cell_modules = []
lstm_cell_modules = [[] for _ in range(2)]
for _ in range(num_params):
for i in range(2):
pred_modules[i].append(fn_pred())
lstm_cell_modules[i].append(fn_lstm_cell())
tree_cell_modules.append(fn_tree_cell())
self.has_left_modules, self.has_right_modules = [nn.ModuleList(l) for l in pred_modules]
self.l2r_modules= nn.ModuleList(tree_cell_modules)
self.cell_topdown_modules, self.cell_topright_modules = [nn.ModuleList(l) for l in lstm_cell_modules]
self.lr2p_cell = fn_tree_cell()
self.row_tree = FenwickTree(args)
if args.tree_pos_enc:
self.tree_pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base, bias=np.pi / 4)
else:
self.tree_pos_enc = lambda x: 0
def cell_topdown(self, x, y, lv):
cell = self.m_cell_topdown if self.share_param else self.cell_topdown_modules[lv]
return cell(x, y)
def cell_topright(self, x, y, lv):
cell = self.m_cell_topright if self.share_param else self.cell_topright_modules[lv]
return cell(x, y)
def l2r_cell(self, x, y, lv):
cell = self.m_l2r_cell if self.share_param else self.l2r_modules[lv]
return cell(x, y)
def pred_has_left(self, x, lv):
mlp = self.m_pred_has_left if self.share_param else self.has_left_modules[lv]
return mlp(x)
def pred_has_right(self, x, lv):
mlp = self.m_pred_has_right if self.share_param else self.has_right_modules[lv]
return mlp(x)
def get_empty_state(self):
if self.bits_compress:
return self.bit_rep_net([], 1)
else:
return (self.empty_h0, self.empty_c0)
def get_prob_fix(self, prob):
p = prob * (1 - self.greedy_frac)
if prob >= 0.5:
p += self.greedy_frac
return p
def gen_row(self, ll, state, tree_node, col_sm, lb, ub):
assert lb <= ub
if tree_node.is_root:
prob_has_edge = torch.sigmoid(self.pred_has_ch(state[0]))
if col_sm.supervised:
has_edge = len(col_sm.indices) > 0
else:
has_edge = np.random.rand() < self.get_prob_fix(prob_has_edge.item())
if ub == 0:
has_edge = False
if tree_node.n_cols <= 0:
has_edge = False
if lb:
has_edge = True
if has_edge:
ll = ll + torch.log(prob_has_edge)
else:
ll = ll + torch.log(1 - prob_has_edge)
tree_node.has_edge = has_edge
else:
assert ub > 0
tree_node.has_edge = True
if not tree_node.has_edge: # an empty tree
return ll, self.get_empty_state(), 0
if tree_node.is_leaf:
tree_node.bits_rep = [0]
col_sm.add_edge(tree_node.col_range[0])
if self.bits_compress:
return ll, self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols), 1
else:
return ll, (self.leaf_h0, self.leaf_c0), 1
else:
tree_node.split()
mid = (tree_node.col_range[0] + tree_node.col_range[1]) // 2
left_prob = torch.sigmoid(self.pred_has_left(state[0], tree_node.depth))
if col_sm.supervised:
has_left = col_sm.next_edge < mid
else:
has_left = np.random.rand() < self.get_prob_fix(left_prob.item())
if ub == 0:
has_left = False
if lb > tree_node.rch.n_cols:
has_left = True
ll = ll + (torch.log(left_prob) if has_left else torch.log(1 - left_prob))
left_pos = self.tree_pos_enc([tree_node.lch.n_cols])
state = self.cell_topdown(self.topdown_left_embed[[int(has_left)]] + left_pos, state, tree_node.depth)
if has_left:
lub = min(tree_node.lch.n_cols, ub)
llb = max(0, lb - tree_node.rch.n_cols)
ll, left_state, num_left = self.gen_row(ll, state, tree_node.lch, col_sm, llb, lub)
else:
left_state = self.get_empty_state()
num_left = 0
right_pos = self.tree_pos_enc([tree_node.rch.n_cols])
topdown_state = self.l2r_cell(state, (left_state[0] + right_pos, left_state[1] + right_pos), tree_node.depth)
rlb = max(0, lb - num_left)
rub = min(tree_node.rch.n_cols, ub - num_left)
if not has_left:
has_right = True
else:
right_prob = torch.sigmoid(self.pred_has_right(topdown_state[0], tree_node.depth))
if col_sm.supervised:
has_right = col_sm.has_edge(mid, tree_node.col_range[1])
else:
has_right = np.random.rand() < self.get_prob_fix(right_prob.item())
if rub == 0:
has_right = False
if rlb:
has_right = True
ll = ll + (torch.log(right_prob) if has_right else torch.log(1 - right_prob))
topdown_state = self.cell_topright(self.topdown_right_embed[[int(has_right)]], topdown_state, tree_node.depth)
if has_right: # has edge in right child
ll, right_state, num_right = self.gen_row(ll, topdown_state, tree_node.rch, col_sm, rlb, rub)
else:
right_state = self.get_empty_state()
num_right = 0
if tree_node.col_range[1] - tree_node.col_range[0] <= self.bits_compress:
summary_state = self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols)
else:
summary_state = self.lr2p_cell(left_state, right_state)
return ll, summary_state, num_left + num_right
def forward(self, node_end, edge_list=None, node_start=0, list_states=[], lb_list=None, ub_list=None, col_range=None, num_nodes=None, display=False):
pos = 0
total_ll = 0.0
edges = []
self.row_tree.reset(list_states)
controller_state = self.row_tree()
if num_nodes is None:
num_nodes = node_end
pbar = range(node_start, node_end)
if display:
pbar = tqdm(pbar)
for i in pbar:
if edge_list is None:
col_sm = ColAutomata(supervised=False)
else:
indices = []
while pos < len(edge_list) and i == edge_list[pos][0]:
indices.append(edge_list[pos][1])
pos += 1
indices.sort()
col_sm = ColAutomata(supervised=True, indices=indices)
cur_row = AdjRow(i, self.directed, self.self_loop, col_range=col_range)
lb = 0 if lb_list is None else lb_list[i]
ub = cur_row.root.n_cols if ub_list is None else ub_list[i]
cur_pos_embed = self.row_tree.pos_enc([num_nodes - i])
controller_state = [x + cur_pos_embed for x in controller_state]
ll, cur_state, _ = self.gen_row(0, controller_state, cur_row.root, col_sm, lb, ub)
assert lb <= len(col_sm.indices) <= ub
controller_state = self.row_tree(cur_state)
edges += [(i, x) for x in col_sm.indices]
total_ll = total_ll + ll
return total_ll, edges, self.row_tree.list_states
def binary_ll(self, pred_logits, np_label, need_label=False, reduction='sum'):
pred_logits = pred_logits.view(-1, 1)
label = torch.tensor(np_label, dtype=torch.float32).to(pred_logits.device).view(-1, 1)
loss = F.binary_cross_entropy_with_logits(pred_logits, label, reduction=reduction)
if need_label:
return -loss, label
return -loss
def forward_row_trees(self, graph_ids, list_node_starts=None, num_nodes=-1, list_col_ranges=None):
TreeLib.PrepareMiniBatch(graph_ids, list_node_starts, num_nodes, list_col_ranges)
# embed trees
all_ids = TreeLib.PrepareTreeEmbed()
if not self.bits_compress:
h_bot = torch.cat([self.empty_h0, self.leaf_h0], dim=0)
c_bot = torch.cat([self.empty_c0, self.leaf_c0], dim=0)
fn_hc_bot = lambda d: (h_bot, c_bot)
else:
binary_embeds, base_feat = TreeLib.PrepareBinary()
fn_hc_bot = lambda d: (binary_embeds[d], binary_embeds[d]) if d < len(binary_embeds) else base_feat
max_level = len(all_ids) - 1
h_buf_list = [None] * (len(all_ids) + 1)
c_buf_list = [None] * (len(all_ids) + 1)
for d in range(len(all_ids) - 1, -1, -1):
fn_ids = lambda i: all_ids[d][i]
if d == max_level:
h_buf = c_buf = None
else:
h_buf = h_buf_list[d + 1]
c_buf = c_buf_list[d + 1]
h_bot, c_bot = fn_hc_bot(d + 1)
new_h, new_c = batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_ids, self.lr2p_cell)
h_buf_list[d] = new_h
c_buf_list[d] = new_c
return fn_hc_bot, h_buf_list, c_buf_list
def forward_row_summaries(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
return row_states, next_states
def forward_train(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
# make prediction
logit_has_edge = self.pred_has_ch(row_states[0])
has_ch, _ = TreeLib.GetChLabel(0, dtype=np.bool)
ll = self.binary_ll(logit_has_edge, has_ch)
# has_ch_idx
cur_states = (row_states[0][has_ch], row_states[1][has_ch])
lv = 0
while True:
is_nonleaf = TreeLib.QueryNonLeaf(lv)
if is_nonleaf is None or np.sum(is_nonleaf) == 0:
break
cur_states = (cur_states[0][is_nonleaf], cur_states[1][is_nonleaf])
left_logits = self.pred_has_left(cur_states[0], lv)
has_left, num_left = TreeLib.GetChLabel(-1, lv)
left_update = self.topdown_left_embed[has_left] + self.tree_pos_enc(num_left)
left_ll, float_has_left = self.binary_ll(left_logits, has_left, need_label=True, reduction='sum')
ll = ll + left_ll
cur_states = self.cell_topdown(left_update, cur_states, lv)
left_ids = TreeLib.GetLeftRootStates(lv)
h_bot, c_bot = fn_hc_bot(lv + 1)
if lv + 1 < len(h_buf_list):
h_next_buf, c_next_buf = h_buf_list[lv + 1], c_buf_list[lv + 1]
else:
h_next_buf = c_next_buf = None
left_subtree_states = tree_state_select(h_bot, c_bot,
h_next_buf, c_next_buf,
lambda: left_ids)
has_right, num_right = TreeLib.GetChLabel(1, lv)
right_pos = self.tree_pos_enc(num_right)
left_subtree_states = [x + right_pos for x in left_subtree_states]
topdown_state = self.l2r_cell(cur_states, left_subtree_states, lv)
right_logits = self.pred_has_right(topdown_state[0], lv)
right_update = self.topdown_right_embed[has_right]
topdown_state = self.cell_topright(right_update, topdown_state, lv)
right_ll = self.binary_ll(right_logits, has_right, reduction='none') * float_has_left
ll = ll + torch.sum(right_ll)
lr_ids = TreeLib.GetLeftRightSelect(lv, np.sum(has_left), np.sum(has_right))
new_states = []
for i in range(2):
new_s = multi_index_select([lr_ids[0], lr_ids[2]], [lr_ids[1], lr_ids[3]],
cur_states[i], topdown_state[i])
new_states.append(new_s)
cur_states = tuple(new_states)
lv += 1
return ll, next_states
| apache-2.0 | 8,929,886,291,118,226,000 | 42.715105 | 153 | 0.55264 | false |
theislab/scanpy | scanpy/tests/test_ingest.py | 1 | 3770 | import pytest
import numpy as np
from sklearn.neighbors import KDTree
from umap import UMAP
import scanpy as sc
from scanpy import settings
from scanpy._compat import pkg_version
X = np.array(
[
[1.0, 2.5, 3.0, 5.0, 8.7],
[4.2, 7.0, 9.0, 11.0, 7.0],
[5.1, 2.0, 9.0, 4.0, 9.0],
[7.0, 9.4, 6.8, 9.1, 8.0],
[8.9, 8.6, 9.6, 1.0, 2.0],
[6.5, 8.9, 2.2, 4.5, 8.9],
]
)
T = np.array([[2.0, 3.5, 4.0, 1.0, 4.7], [3.2, 2.0, 5.0, 5.0, 8.0]])
@pytest.fixture
def adatas():
pbmc = sc.datasets.pbmc68k_reduced()
n_split = 500
adata_ref = sc.AnnData(pbmc.X[:n_split, :], obs=pbmc.obs.iloc[:n_split])
adata_new = sc.AnnData(pbmc.X[n_split:, :])
sc.pp.pca(adata_ref)
sc.pp.neighbors(adata_ref)
sc.tl.umap(adata_ref)
return adata_ref, adata_new
def test_representation(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X_pca'
assert ing._obsm['rep'].shape == (adata_new.n_obs, settings.N_PCS)
assert ing._pca_centered
sc.pp.pca(adata_ref, n_comps=30, zero_center=False)
sc.pp.neighbors(adata_ref)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X_pca'
assert ing._obsm['rep'].shape == (adata_new.n_obs, 30)
assert not ing._pca_centered
sc.pp.neighbors(adata_ref, use_rep='X')
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X'
assert ing._obsm['rep'] is adata_new.X
def test_neighbors(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.neighbors(k=10)
indices = ing._indices
tree = KDTree(adata_ref.obsm['X_pca'])
true_indices = tree.query(ing._obsm['rep'], 10, return_distance=False)
num_correct = 0.0
for i in range(adata_new.n_obs):
num_correct += np.sum(np.in1d(true_indices[i], indices[i]))
percent_correct = num_correct / (adata_new.n_obs * 10)
assert percent_correct > 0.99
@pytest.mark.parametrize('n', [3, 4])
def test_neighbors_defaults(adatas, n):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
sc.pp.neighbors(adata_ref, n_neighbors=n)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.neighbors()
assert ing._indices.shape[1] == n
@pytest.mark.skipif(
pkg_version("anndata") < sc.tl._ingest.ANNDATA_MIN_VERSION,
reason="`AnnData.concatenate` does not concatenate `.obsm` in old anndata versions",
)
def test_ingest_function(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
sc.tl.ingest(
adata_new,
adata_ref,
obs='bulk_labels',
embedding_method=['umap', 'pca'],
inplace=True,
)
assert 'bulk_labels' in adata_new.obs
assert 'X_umap' in adata_new.obsm
assert 'X_pca' in adata_new.obsm
ad = sc.tl.ingest(
adata_new,
adata_ref,
obs='bulk_labels',
embedding_method=['umap', 'pca'],
inplace=False,
)
assert 'bulk_labels' in ad.obs
assert 'X_umap' in ad.obsm
assert 'X_pca' in ad.obsm
def test_ingest_map_embedding_umap():
adata_ref = sc.AnnData(X)
adata_new = sc.AnnData(T)
sc.pp.neighbors(
adata_ref, method='umap', use_rep='X', n_neighbors=4, random_state=0
)
sc.tl.umap(adata_ref, random_state=0)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.map_embedding(method='umap')
reducer = UMAP(min_dist=0.5, random_state=0, n_neighbors=4)
reducer.fit(X)
umap_transformed_t = reducer.transform(T)
assert np.allclose(ing._obsm['X_umap'], umap_transformed_t)
| bsd-3-clause | 1,639,938,025,560,681,500 | 23.640523 | 88 | 0.602918 | false |
Azure/azure-sdk-for-python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_policy_operations.py | 1 | 21264 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProductPolicyOperations(object):
"""ProductPolicyOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyCollection"
"""Get the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyCollection, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies'} # type: ignore
def get_entity_tag(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
**kwargs # type: Any
):
# type: (...) -> bool
"""Get the ETag of the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
format="xml", # type: Optional[Union[str, "_models.PolicyExportFormat"]]
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyContract"
"""Get the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param format: Policy Export Format.
:type format: str or ~azure.mgmt.apimanagement.models.PolicyExportFormat
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if format is not None:
query_parameters['format'] = self._serialize.query("format", format, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
parameters, # type: "_models.PolicyContract"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyContract"
"""Creates or updates policy configuration for the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param parameters: The policy contents to apply.
:type parameters: ~azure.mgmt.apimanagement.models.PolicyContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PolicyContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the policy configuration at the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
| mit | 2,958,798,404,665,335,000 | 49.992806 | 222 | 0.644987 | false |
mobarski/sandbox | rsm/v10/common2.py | 1 | 1795 | from __future__ import print_function
import numpy as np
from random import shuffle, random,seed
from time import time
from heapq import nlargest
from collections import deque,Counter
from itertools import cycle
import marshal
from pprint import pprint
import sys
def combinations(n,k):
"return k from n combination"
out = list(range(n))
shuffle(out)
return out[:k]
def random_vector(n,lo=0,hi=1):
"return 1d uniform random vector"
return np.random.randint(lo,hi+1,n)
def random_sparse_vector(n,lo=0,hi=1,d=0.1,k=None):
"return 1d random vector with some of its values set to zero"
sparse = np.zeros(n)
k = k or int(d*n)
positions = combinations(n,k)
sparse[list(positions)] = random_vector(k,lo+1,hi)
return sparse
def top(k,d,items=False,values=False):
"return k elements with largest values from dictionary"
if items:
return nlargest(k,((x,d[x]) for x in d),key=lambda x:x[1])
elif values:
return nlargest(k,d.values())
else:
return nlargest(k,d,key=lambda x:d[x])
def clock(label,t0,t1=None):
"print execution time"
dt = time()-t0 if t1==None else t1-t0
print("{:.3f}\t{}".format(dt,label))
def avg(v):
"average"
return 1.0*sum(v)/len(v)
def gini(data):
"gini index"
g = 0
for a in data:
for b in data:
g += abs(a-b)
return float(g)/(2.0*len(data)*sum(data))
def pick(v_set,n):
"select n random values from a set"
if n<=0: return []
out = list(v_set)
shuffle(out)
return out[:n]
if __name__=="__main__":
x = random_vector(30,0,1)
print(x)
y = random_vector(30,0,1)
print(y)
print(x+y)
print(combinations(10,5))
d = dict(enumerate(x+y))
print(top(3,d,values=True))
print(top(2,dict(a=1,b=2,c=3),values=True))
print(top(2,dict(a=1,b=2,c=3),values=False))
print(random_sparse_vector(20,d=0.2))
print(random_sparse_vector(20,k=10))
| mit | -8,747,349,751,396,877,000 | 22.618421 | 62 | 0.687465 | false |
CroMarmot/MyOICode | ProjectEuler/p233.py | 1 | 5380 | p = dict()
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def main():
# 420 / 4 = 105
p[1] = 1
m = 2
maxv = 2*10**7
while m * m < maxv:
n = m - 1
while n > 0 and n*n-m*m+2*n*m > 0:
if gcd(n, m) != 1:
n -= 1
continue
n2m2 = n*n + m*m
if n2m2 % 2 == 0:
n2m2 //= 2
# print((n*n+2*m*n-m*m)//2, (m*m+2*m*n-n*n)//2, n2m2)
else:
# print(n*n+2*m*n-m*m, m*m+2*m*n-n*n, n2m2)
pass
if n2m2 <= maxv:
if n2m2 in p:
p[n2m2] += 2
else:
p[n2m2] = 2
# print(n2m2)
n -= 1
m += 1
# print('m', m)
print("finish p")
for i in range(1, maxv):
if i % 2 == 0:
continue
s = 0
for j in range(1, 10**4):
if j * j > i:
break
if i % j == 0 and j <= i//j:
if j in p:
s += p[j]
if i//j > j and i//j in p:
s += p[i//j]
if 4 * s == 420:
print(i, 4 * s)
main()
print("end")
"""
359125 420
469625 420
612625 420
781625 420
866125 420
933725 420
1047625 420
1077375 420
1119625 420
1288625 420
1336625 420
1366625 420
1408875 420
1481125 420
1542125 420
1592825 420
1596725 420
1787125 420
1837875 420
1880125 420
1914625 420
2032225 420
2049125 420
2133625 420
2203625 420
2224625 420
2251925 420
2302625 420
2344875 420
2387125 420
2513875 420
2598375 420
2637125 420
2731625 420
2801175 420
2894125 420
2909125 420
2911025 420
3142875 420
3147625 420
3174665 420
3215125 420
3232125 420
3287375 420
3316625 420
3350425 420
3358875 420
3504125 420
3561925 420
3572125 420
3648625 420
3654625 420
3823625 420
3865875 420
3889625 420
3937625 420
3950375 420
4009525 420
4009875 420
4077125 420
4082125 420
4099875 420
4151485 420
4161625 420
4226625 420
4288375 420
4310125 420
4443375 420
4544525 420
4564625 420
4626375 420
4778475 420
4790175 420
4837625 420
4888325 420
4922125 420
4949125 420
4962625 420
5035825 420
5091125 420
5165875 420
5327725 420
5361375 420
5382625 420
5429125 420
5471375 420
5513625 420
5547425 420
5571625 420
5640375 420
5671625 420
5682625 420
5743875 420
5851625 420
5936125 420
5969125 420
5986825 420
6046625 420
6062875 420
6093625 420
6096675 420
6147375 420
6189625 420
6206525 420
6249625 420
6400875 420
6412625 420
6509725 420
6536075 420
6538625 420
6610875 420
6612125 420
6673875 420
6696625 420
6738875 420
6755775 420
6823375 420
6907875 420
6972125 420
7016125 420
7034625 420
7116625 420
7119125 420
7161375 420
7333375 420
7372625 420
7457125 420
7492325 420
7524725 420
7541625 420
7674125 420
7774625 420
7795125 420
7837375 420
7879625 420
7907125 420
7911375 420
7926425 420
8183825 420
8194875 420
8217625 420
8259875 420
8272625 420
8386625 420
8403525 420
8417125 420
8471125 420
8597875 420
8623225 420
8640125 420
8659625 420
8682375 420
8706125 420
8727375 420
8733075 420
8893625 420
8922875 420
8966225 420
9020375 420
9069625 420
9147125 420
9238385 420
9284125 420
9356125 420
9356375 420
9428625 420
9442875 420
9485125 420
9502025 420
9523995 420
9527375 420
9566375 420
9645375 420
9654125 420
9696375 420
9717625 420
9738625 420
9862125 420
9941425 420
9949875 420
10006625 420
10051275 420
10076625 420
10151125 420
10182625 420
10197125 420
10270975 420
10365325 420
10367875 420
10438625 420
10512375 420
10584625 420
10600525 420
10617625 420
10685775 420
10716375 420
10752625 420
10794875 420
10801375 420
10820225 420
10931425 420
10945875 420
10963875 420
11006125 420
11132875 420
11136625 420
11149775 420
11177075 420
11307125 420
11324125 420
11428625 420
11451625 420
11458625 420
11470875 420
11523875 420
11597625 420
11639875 420
11668875 420
11766625 420
11812875 420
11851125 420
11879125 420
11914025 420
12020125 420
12028575 420
12029625 420
12189125 420
12231375 420
12246375 420
12299625 420
12315875 420
12405325 420
12454455 420
12484875 420
12492125 420
12509875 420
12527125 420
12607625 420
12679875 420
12752125 420
12797525 420
12817625 420
12865125 420
12930375 420
12949625 420
12991625 420
13160875 420
13236925 420
13330125 420
13402375 420
13633575 420
13693875 420
13879125 420
13879225 420
14090375 420
14174875 420
14225575 420
14335425 420
14343875 420
14370525 420
14512875 420
14558375 420
14664975 420
14702875 420
14766375 420
14775125 420
14847375 420
14850875 420
14887875 420
14935375 420
15032875 420
15107475 420
15208625 420
15230125 420
15273375 420
15425375 420
15433925 420
15442375 420
15497625 420
15572375 420
15763475 420
15983175 420
16084125 420
16118375 420
16147875 420
16287375 420
16292375 420
16414125 420
16456375 420
16540875 420
16642275 420
16709875 420
16714875 420
16878875 420
16921125 420
16963375 420
17014875 420
17047875 420
17231625 420
17521075 420
17554875 420
17563975 420
17597125 420
17740775 420
17808375 420
17907375 420
17960475 420
17977375 420
18139875 420
18186625 420
18188625 420
18280875 420
18290025 420
18300925 420
18442125 420
18459875 420
18568875 420
18619575 420
18748875 420
18991375 420
19121375 420
19202625 420
19237875 420
19529175 420
19608225 420
19615875 420
19658375 420
19832625 420
19836375 420
19904875 420
19920875 420
"""
| gpl-3.0 | 8,071,894,136,991,945,000 | 12.830334 | 69 | 0.712268 | false |
khile/botbot | botbot/bangs.py | 1 | 88080 | BANG_DICT = {
"!allegro": "Allegro",
"!cakebook": "CakePHP Cookbook",
"!animeka": "Animeka",
"!webcams": "webcams.travel",
"!ipernity": "Ipernity",
"!trademe": "TradeMe",
"!anidb": "aniDB",
"!nuget": "nuget gallery",
"!pgp": "MIT PGP Public Key Server Lookup",
"!endthelie": "End the Lie",
"!fuzz": "FuzzFind",
"!slate": "Slate",
"!wikipedia": "Wikipedia",
"!wsjmw": "http://www.marketwatch.com/",
"!wosn": "webOS Nation",
"!cbsnews": "CBSNews",
"!wosi": "WebOS Internals",
"!emy": "ebay.com.my",
"!searchforphp": "SearchForPHP",
"!emc": "electronic medicines compendium",
"!bondfaro": "Bondfaro",
"!gpt": "Google PT",
"!gps": "Google Product Search",
"!sas": "SAS",
"!gamesradar": "GamesRadar",
"!qme": "quickmeme",
"!espacenet": "Espacenet",
"!twn": "The Weather Network",
"!uscript": "Unity Script Reference",
"!engadget": "Engadget",
"!dvdfr": "DVDFr",
"!najdi": "Najdi.si",
"!issuu": "Issuu.com",
"!ocal": "Open Clip Art Library",
"!cooks": "Cooks.com",
"!vmkb": "VMware Knowledge Base",
"!wowarmoryeu": "WOW Armory EU",
"!scifi": "Worlds Without End",
"!vgg": "viagogo",
"!everymac": "EveryMac.com",
"!vgd": "v.gd",
"!heise": "www.heise.de",
"!validate": "W3C Validator",
"!movielike": "Movielike",
"!allelec": "All Electronics",
"!n": "Google News",
"!veoh": "Veoh",
"!m": "Google Maps",
"!videosift": "Videosift",
"!i": "Google Images",
"!g": "Google",
"!heinzelnisse": "Heinzelnisse",
"!e": "eBay",
"!b": "Bing",
"!price": "Amazon.com",
"!a": "Amazon.com",
"!isohunt": "ISOHunt",
"!unscatter": "Unscatter.com",
"!btdigg": "BtDigg",
"!vin": "Decode This!",
"!v": "YouTube",
"!ter": "TYPO3 Extension Repository",
"!t": "Thesaurus.com",
"!r": "Reddit",
"!s": "startpage.com",
"!nico": "Nico Nico Douga",
"!q": "Quora",
"!perseus": "Perseus Digital Library",
"!cache": "Google Cache",
"!ontrac": "OnTrac",
"!powiki": "Pokemon Online Wiki",
"!lyricswiki": "LyricWiki",
"!killerstartups": "killerstartups.com",
"!eurogamer": "Eurogamer.net",
"!glocal": "Google Local",
"!arte": "http://arte.tv",
"!cpdl": "Choral Public Domain Library",
"!gisoff": "Google Images Safe Off",
"!ip": "WhatIsMyIPAddress.com",
"!nasa": "nasa",
"!conj": "Le Conjugueur",
"!nautiljon": "Nautiljon",
"!cpp": "Cplusplus.com",
"!amfr": "Amazon.fr",
"!boardman": "Boardman Perspective",
"!weheartit": "We Heart It",
"!tweakers": "Tweakers.net",
"!5": "Fiverr",
"!jdk": "Java Docs",
"!lp": "Launchpad",
"!gamezebo": "Gamezebo",
"!fenopy": "Fenopy",
"!lv": "Livermore library",
"!inab": "INeedaBargain.com",
"!oxl": "Oxford Advanced Learner Dictionary",
"!tuaw": "The Unofficial Apple Weblog TUAW",
"!lavva": "Lavva",
"!lh": "Lifehacker",
"!lj": "LiveJournal",
"!lm": "LiveMixtapes",
"!ll": "LiveLeak",
"!gamesandgeeks": "Games and Geeks",
"!cnrtlm": "Centre National de Ressources Textuelles et Lexicales",
"!nes": "Nationalencyklopedin",
"!cnrtls": "Centre National de Ressources Textuelles et Lexicales",
"!hackaday": "HackADay",
"!cake2": "Cakephp 2 Api",
"!wref": "Wordreference en->fr",
"!devapple": "Apple Developer",
"!alternate": "Alternate",
"!wren": "WordReference",
"!icon": "IconFinder",
"!wiktionary": "Wiktionary",
"!wres": "Wordreference",
"!toppreise": "Toppreise.ch",
"!catholic": "Catholic Answers",
"!fcs": "Fulton County Schools",
"!nationalgeographic": "National Geographic",
"!opensuse": "OpenSUSE",
"!vandale": "Van Dale",
"!gnde": "Google News Germany",
"!gorp": "Gorp",
"!codeweavers": "Codeweavers Crossover application database",
"!oolone": "Oolone",
"!siteslike": "siteslike",
"!ao3": "Archive of Our Own",
"!wunderground": "Weather Underground",
"!180": "180.no",
"!golem": "Golem.de",
"!emusic": "eMusic",
"!fluidinfo": "fluidinfo.com",
"!shopwiki": "Shopwiki",
"!jvcom": "jeuxvideo.com",
"!gplusp": "Google Plus",
"!xdaf": "XDA-Developers Forum",
"!abbuc": "Abbuc e.V.",
"!lingvo": "Lingvo",
"!yify": "YIFY Torrents",
"!player": "Player FM",
"!hecf": "Hazme El Chingado Favor",
"!highrec": "HighRec",
"!allbibles": "Online Parallel Bible",
"!lgw": "libregamewiki",
"!bwc": "Belfry Web Comics",
"!hudku": "International Yellow Pages",
"!ebaysg": "ebay.com.sg",
"!mpgde": "Max Planck Society",
"!cean": "CEAN",
"!lgc": "legeekcafe.com",
"!blogtalkradio": "BlogTalkRadio",
"!lmdtfy": "LMDDGTFY.CO.CC",
"!crossword": "WordFun crossword solver",
"!gaccess": "Google Accessible",
"!gid": "Google ID",
"!gie": "Google IE",
"!tudou": "土豆网",
"!gim": "Google Images",
"!ebayes": "ebay ES",
"!boost": "Boost.org",
"!gik": "Google IL",
"!git": "Git",
"!nrcnext": "NRC Next",
"!ebde": "ebay.de",
"!newscomau": "News.com.au",
"!gir": "Dear Computer Google Image Ripper SFW",
"!gis": "Google Search by Image",
"!sciencedaily": "Sciencedaily",
"!exfm": "exfm",
"!blackbookmag": "BlackBook",
"!mineforum": "Minecraft Forums",
"!yandex": "Yandex",
"!optimot": "Optimot",
"!pkgs": "Linux Packages Search",
"!drugs": "Drugs",
"!zyrv": "Zyrv.com",
"!mamma": "Mamma",
"!stockphotos": "iStockPhoto",
"!lycos": "Lycos",
"!clojuredocs": "Clojure Docs",
"!foofind": "Foofind",
"!chicagotribune": "The Chicago Tribune",
"!roadandtrack": "Road & Track",
"!msh": "Jaen",
"!nzbmatrix": "NzbMatrix",
"!wowbattlenet": "BattleNet",
"!trulia": "Trulia",
"!okf": "otrkeyfinder",
"!msq": "Morningstar Quote",
"!gblast": "Gigablast ",
"!bigfish": "Big Fish Games",
"!eeggs": "Easter Eggs",
"!mspa": "MS Paint Adventures Wiki",
"!bay12": "Bay12Games",
"!sba": "SBA",
"!openradar": "OpenRadar",
"!pcworld": "Pcworld",
"!kotakuau": "KotakuAus",
"!43things": "43Things",
"!ixquickvideos": "Ixquick videos",
"!naturespic": "Nature's Pic Images",
"!xda": "XDA-Developers Forum",
"!exif": "Jeffrey's Exif Viewer",
"!animenewsnetwork": "Anime News Network",
"!stock": "Google Finance",
"!aukro": "Aukro",
"!spi": "Startpage Images",
"!pc": "ProCog",
"!hoogle": "Hoogle",
"!moviepilot": "Moviepilot DE",
"!youtube": "YouTube",
"!earth911": "Earth911.com",
"!spr": "SitePoint Reference",
"!lurk": "Lurkmoar",
"!ilyax": "ilyax iblox is my blox",
"!hadoop": "Search-Hadoop",
"!xep": "XMPP Extension Protocols",
"!animelyrics": "AnimeLyrics",
"!tv2": "TV2",
"!sslife": "Skyscraperlife",
"!subreddit": "Reddit Subreddits",
"!ytl": "YouTube - long",
"!kerodicas": "KeroDicas",
"!submarino": "Submarino",
"!soso": "Soso",
"!worldofspectrum": "World of Spectrum",
"!nullege": "Nullege",
"!emacs": "EmacsWiki",
"!013": "0l3.de",
"!filefacts": "Filefacts",
"!carmag": "CarMagazine.co.uk",
"!awesomecow": "Awesome Cow",
"!wink": "Wink",
"!next": "Next-Episode.net",
"!winc": "Wikimedia Incubator",
"!perldoc": "Perldoc.perl.org",
"!wattpad": "Wattpad",
"!3tailer": "3tailer",
"!rfaces": "alltheragefaces",
"!orthonet": "orthonet",
"!ci": "Codeigniter User Guide",
"!diigo": "Diigo",
"!luaforge": "LuaForge",
"!cf": "ColdFusion Docs",
"!upackages": "Ubuntu Packages",
"!cc": "CreativeCommons.org",
"!wnl": "Wikipedia NL",
"!wno": "Wikipedia NO",
"!wnn": "Wikipedia Nynorsk",
"!scala": "Scala ",
"!geoip": "GeoIpTool",
"!epl": "ebay.pl",
"!investopedia": "Investopedia",
"!vndb": "VNDB",
"!worldcat": "WorldCat.org",
"!shareware": "Download.com",
"!gridcalendar": "GriCal",
"!last.fm": "Last.fm",
"!moo": "mooTools Docs",
"!eph": "ebay.ph",
"!thalia": "Thalia",
"!mpny": "MenuPages",
"!onion": "The Onion",
"!ade": "Amazon.de",
"!w3schools": "W3Schools.com",
"!latex": "LaTeX Code",
"!avax": "AvaxHome",
"!merriamwebster": "Merriam-Webster",
"!epg": "epguides.com",
"!btmon": "BitTorrentMonster",
"!ftp": "Mamont",
"!nvd": "National Vulnerability Database",
"!hatbuzz": "hatbuzz",
"!wpl": "Wikipedia PL",
"!libris": "Libris",
"!cake2book": "Cakephp 2 Book",
"!googlemaps": "Google Maps",
"!pplware": "Pplware",
"!ling": "Ling.pl",
"!wykop": "Wykop",
"!lyrics": "Lyric Wiki",
"!jetwick": "Jetwick",
"!cinemagay": "CinemaGay",
"!d": "The Free Dictionary",
"!help": "DuckDuckGo Help",
"!leos": "LEO Dictionary Spanish",
"!leor": "LEO Dictionary Russian",
"!hack": "duckduckhack",
"!leoi": "LEO Dictionary Italian",
"!leof": "LEO Dictionary French",
"!whois": "DomainTools.com",
"!ted": "TED",
"!leoc": "LEO Dictionary Chinese",
"!wowpedia": "Wowpedia",
"!mcanime": "MCAnime.net Beta",
"!liquidation": "http://www.liquidation.com",
"!gde": "Google DE",
"!gdk": "Google DK",
"!bigstock": "Bigstock",
"!kindleuk": "Amazon UK Kindle",
"!launchpad": "LaunchPad",
"!gtranslate": "Google Translate",
"!prisjakt": "Prisjakt",
"!gdt": "Grand Dictionnaire Terminologique",
"!vim": "Vim.org",
"!ebayph": "ebay.ph",
"!wieowie": "Wieowie",
"!ebaypl": "ebay.pl",
"!mootools": "mooTools Docs",
"!here": "Here.net formerly Nokia Maps",
"!ebes": "ebay ES",
"!chefkoch": "Chefkoch",
"!nounproject": "NounProject",
"!food": "Food.com",
"!w": "Wikipedia",
"!gjp": "Google JP",
"!pxb": "Pixabay",
"!globeandmail": "The Globe And Mail",
"!tokyotosho": "Tokyotosho",
"!gittigidiyor": "GittiGidiyor",
"!gpat": "Google Patents",
"!acro": "Acronyms",
"!foursquare": "foursquare",
"!ohnorobot": "Oh No Robot",
"!scaruffi": "Scaruffi",
"!icecast": "Icecast Directory",
"!gsub": "Greek Subtitles Project",
"!glyde": "Glyde",
"!everyclick": "Every Click",
"!gsuk": "Google Shopping UK",
"!musicme": "MusicMe",
"!folha": "Folha de S. Paulo",
"!chrome": "Chrome Extensions",
"!gl": "google",
"!cowboylyrics": "Cowboy Lyrics",
"!bl": "DuckDuckGo Blinders",
"!clseattle": "Craigslist",
"!gpackages": "Gentoo Packages",
"!h18": "http://hangar18.com.ar",
"!giezhals": "Geizhals",
"!html2txt": "Html2Txt",
"!newsyc": "HNsearch",
"!cnrtl": "Centre National des Ressources Textuelles et Lexicales",
"!fren": "Wordreference",
"!worm": "Wormbase",
"!lspace": "L-Space",
"!msmalware": "Microsoft malware encyclopedia",
"!vogue": "Vogue.com",
"!azfonts": "AZFonts",
"!ebe": "ebay.be",
"!web2py": "Web2Py",
"!dhlgm": "DHL Global Mail",
"!boardgamegeek": "BoardGameGeek",
"!eventful": "Eventful",
"!epinions": "Epinions",
"!googletr": "Google Turkey",
"!wotif": "Wotif",
"!freecode": "FreshMeat.net",
"!yeahway": "Yeah Way!",
"!ips": "IPS",
"!spstory": "cultura",
"!reciva": "Reciva",
"!ebayuk": "ebay.co.uk",
"!howjsay": "HowJSay",
"!wikthu": "Wiktionary hu",
"!jinni": "Jinni",
"!hotukdeals": "Hotukdeals",
"!tld": "Gandi.net",
"!dgg": "GG - dejure.org",
"!leo": "LEO Dictionary",
"!gog": "Good old games",
"!god": "Guts of Darkness",
"!ebbe": "ebay.be",
"!espacoliberdade": "Espaço Liberdade",
"!bce": "dict.bing.com.cn",
"!lewrockwell": "LewRockwell.com",
"!gov": "US Government Search",
"!epfl": "EPFL",
"!bcp": "Best Current Practices",
"!citeseer": "CiteSeer",
"!duo": "..duo dotdotduo",
"!vggde": "viagogo.de",
"!duc": "Duke University Catalog",
"!ebca": "ebay.ca",
"!ikso": "Ikso Kantaro",
"!startpage": "StartPage",
"!vimscripts": "Vim Scripts",
"!xdcc": "XDCCFinder",
"!gbugs": "Gentoo's Bugzilla",
"!pollin": "Pollin Electronic",
"!trfde": "transfermarkt.de",
"!dman": "Debian Hypertext Man Pages",
"!oopon": "Oopon",
"!timestamp": "DuckDuckGo",
"!gamefaqs": "GameFAQs.com",
"!bookmine": "Gutenberg text content",
"!nypost": "NYPost",
"!indigo": "Indigo Books",
"!forbes": "Forbes",
"!compass": "Compass",
"!dribbble": "dribbble",
"!miniclip": "Miniclip",
"!bsocial": "Bing Social",
"!lololyrics": "Lololyrics",
"!gplay": "Google Play Store",
"!zapaday": "Zapaday",
"!apolloduck": "Apollo Duck",
"!idealode": "Idealo",
"!scipy": "SciPy",
"!munju": "Munju",
"!evirttit": "Estante Virtual by title",
"!isup": "Is Up?",
"!catalinstefan": "http://android.catalinstefan.com/",
"!shoutcast": "Shoutcast Streaming Internet Radio",
"!geocaching": "Geocaching.com",
"!bbapps": "BlackBerry App World",
"!meteociel": "Meteociel",
"!canistream": "Can I Stream It?",
"!pagine": "Pagine Gialle",
"!wadoku": "WaDoKu",
"!spinoza": "Spinoza",
"!gmob": "Google mobile site viewer",
"!pep": "Python PEP",
"!ebayin": "ebay.in",
"!yellownz": "Yellow NZ",
"!shopzilla": "Shopzilla",
"!ebayie": "ebay.ie",
"!coldfusion": "ColdFusion Docs",
"!gtabs": "UltimateGuitar.com",
"!dtc": "DansTonChat",
"!java6": "Java6 Docs",
"!java7": "Java7 Docs",
"!kippt": "Kippt",
"!taobao": "Taobao",
"!stackoverflow": "StackOverflow",
"!anandtech": "Anandtech",
"!mendeley": "Mendeley",
"!python3": "Python3 Docs",
"!jed": "Joomla ",
"!tradera": "Tradera",
"!texdoc": "Texdoc",
"!mv": "Myvideo",
"!mw": "Merriam-Webster",
"!tent": "Skate",
"!ms": "Microsoft",
"!mp": "MenuPages",
"!mq": "MapQuest",
"!angelhalowiki": "Angelhalo Wiki",
"!bbcw": "BBC Weather",
"!tanzil": "Tanzil",
"!mb": "MusicBrainz",
"!rosettacode": "RosettaCode.org",
"!ma": "Memory Alpha",
"!duckco": "The DuckDuckGo Community",
"!ml": "Mercado Livre",
"!mh": "The Monster Hunter Wiki",
"!pbone": "RPM.pbone.net",
"!wln": "WestlawNext",
"!pronounce": "Dictionary.com",
"!bestsimilarsites": "bestsimilarsites",
"!cpandeps": "CPAN Dependencies Checker",
"!huffingtonpost": "Huffington Post",
"!apache": "Apache.org",
"!mib": "Mibbit",
"!gworkshop": "Games Workshop",
"!kickstarter": "Kickstarter",
"!rpggeek": "RPGGeek",
"!hi5": "Hi5",
"!gwiki": "Gentoo Wiki",
"!groove": "Grooveshark",
"!atom": "Atomurl ",
"!biography": "Biography.com",
"!uzg": "Uitzending Gemist",
"!fhtml": "Fireburst HTML",
"!jumpr": "Jump",
"!lw": "LessWrong",
"!adlibrisno": "AdLibris NO",
"!komputeko": "Komputeko",
"!csfd": "ČSFD",
"!symbolhound": "Symbol Hound",
"!cook": "Cooks.com",
"!upcoming": "Upcoming",
"!appledev": "Apple Developer",
"!rateyourmusic": "Rate Your Music",
"!jarvanacontent": "Jarvana",
"!clojure": "Clojure Docs",
"!mapp": "ACME Mapper 2.0",
"!maps": "Google Maps",
"!discovery": "Discovery.com",
"!ibm": "IBM",
"!enfr": "Wordreference",
"!man7": "The Linux man-pages project",
"!mamont": "Mamont",
"!mathoverflow": "MathOverflow",
"!dram": "Encyclopedia Dramatica",
"!avclub": "A.V. Club",
"!d20": "d20 SRD",
"!cms": "Chicago Manual of Style",
"!bulbapedia": "Bulbapedia",
"!subsearch": "Subsearch",
"!majorsbooks": "Majors Books",
"!creativecow": "Creative Cow Forums",
"!ripestat": "RIPEstat",
"!koreus": "Koreus",
"!pixelp": "PixelProspector",
"!li": "LinkedIn",
"!nyt": "New York Times",
"!myopera": "My Opera",
"!gnfr": "Google News French",
"!acm": "ACM Guide to Computing Literature",
"!gallica.bnf": "Gallica BNF",
"!willh": "The Will Will Web",
"!slwiki": "Second Life Wiki",
"!askapatient": "AskaPatient.com",
"!sierrasoftworks": "Sierra Softworks",
"!gamespot": "GameSpot",
"!dhnet": "La Dernière Heure",
"!watchtower": "Watchtower Online Library",
"!yjp": "Yahoo! Japan",
"!skyrock": "SkyRock",
"!dell": "Dell",
"!tabs": "UltimateGuitar.com",
"!dlpo": "Dicionário Priberam da Língua Portuguesa",
"!skroutz": "skroutz",
"!flickriver": "Flickriver",
"!bbc": "BBC.co.uk",
"!ghk": "Google HK",
"!flickpeek": "FlickPeek",
"!iarchive": "archive.org",
"!ebayde": "ebay.de",
"!sonicretro": "Sonic Retro / Sega Retro",
"!openbook": "OpenBook",
"!qobuz": "Qobuz",
"!ebch": "ebay.ch",
"!bbt": "BakaBT",
"!sulit": "Sulit",
"!psulib": "Penn State Library",
"!rfc": "tools.ietf.org",
"!gcsebitesize": "BBC GCSE Bitesize",
"!onelook": "OneLook",
"!nyaa": "Nyaa Torrents",
"!swdb": "The Spaghetti Western Database",
"!fxr": "FreeBSD Cross Reference",
"!arcgis": "ArcGIS Online",
"!neto": "Netophonix",
"!gus": "Google US",
"!cfp": "WikiCFP",
"!grave": "Find A Grave",
"!dbgb": "BGB - dejure.org",
"!rps": "Rock, Paper, Shotgun",
"!ordfr": "Ord.se FR",
"!amazonde": "Amazon",
"!mvnrepository": "MVNRepository",
"!nestuk": "Nestoria UK",
"!reuq": "Stock Quotes & Company News | Reuters.com",
"!dhgate": "DHGate",
"!accuweather": "AccuWeather",
"!vgguk": "viagogo.co.uk",
"!gocomics": "GoComics",
"!bimages": "Bing Images",
"!rpm": "RPMfind.net",
"!legal": "Google Scholar - Legal Opinion Section",
"!cnet": "CNET",
"!workcircle": "Workcircle",
"!khanacademy": "Khan academy",
"!search.refertus": "search.refertus",
"!startrek": "Memory Alpha",
"!lovefilm": "lovefilm.com",
"!priberam": "Priberam",
"!deezer": "Deezer",
"!bloomberg": "Bloomberg",
"!wikimapia": "Wikimapia",
"!qrobe": "qrobe.it",
"!douban": "Douban",
"!suntimes": "SunTimes",
"!ccr": "Chakra Community Repository",
"!yjisho": "Yahoo Jisho",
"!indeed": "Indeed",
"!amglobal": "Amazon GLOBAL",
"!yimages": "Yahoo! Images",
"!techdirt": "TechDirt",
"!mathse": "Mathematics - Stack Exchange",
"!imb": "imbiomed",
"!domainr": "Domainr",
"!img": "Google Images",
"!4chan": "FoOlz [4chan] Archives",
"!kkbruce": "KingKong Bruce記事",
"!derpibooru": "Derpibooru",
"!writen": "Wordreference it->en",
"!vogueuk": "Vogue.co.uk",
"!proc": "Processing.org",
"!bible": "BibleGateway",
"!liquipedia": "Liquipedia2",
"!pdf": "PDFs",
"!gplus": "Google Plus",
"!pdb": "RSCB Protein Data Bank",
"!linkedin": "LinkedIn",
"!acronyms": "Acronyms",
"!dolu": "Dolu Sozluk",
"!eventid": "EventID",
"!mp3skull": "MP3Skull",
"!bitsnoop": "BitSnoop",
"!hypem": "The Hype Machine",
"!sanakirja": "Sanakirja.org",
"!blogs": "Google Blogs",
"!4shared": "4shared",
"!whiskyde": "whisky DE",
"!amit": "Amazon.it",
"!mercurynews": "Mercury News",
"!miso": "http://gomiso.com/",
"!mpph": "MenuPages",
"!wallbase": "wallbase",
"!chef": "Opscode Chef Wiki",
"!makeuseof": "MakeUseOf",
"!scritique": "SensCritique",
"!veroot": "Veroot",
"!lynxview": "Lynx Viewer Yellowpipe.com",
"!charity": "Charity Navigator",
"!wmc": "https://commons.wikimedia.org/",
"!cnn": "CNN",
"!asoiaf": "A Wiki of Ice and Fire",
"!caniuse": "Can I use...",
"!synonym": "Thesaurus.com",
"!similar": "SimilarSites",
"!pinboard": "Pinboard",
"!redis": "Redis Command Reference",
"!foodsubs": "Cook's Thesaurus",
"!hrwiki": "Homestar Runner Wiki",
"!howthingswork": "How Things Work",
"!ubottu": "Ubottu.com",
"!oid": "Search OID registry",
"!azlyrics": "A-Z Lyrics",
"!dailymed": "DailyMed",
"!michaelis": "Michaelis - Portugues - English",
"!mitvid": "MIT Video",
"!api": "DuckDuckGo API",
"!bigwords": "BigWords.com",
"!ggroups": "Google Groups",
"!nvidia": "Nvidia",
"!mmnt": "Mamont",
"!answers": "Answers.com",
"!mash": "mashable",
"!uesp": "The Unoffical Elder Scrolls pages",
"!nfb": "National Film Board of Canada",
"!mpsf": "MenuPages",
"!dwds": "DWDS",
"!jeopardy": "J! Archive",
"!archlinux": "Archlinux Forums",
"!shooter": "Shooter",
"!fdo": "freedesktop.org",
"!123p": "123people",
"!zappos": "Zappos",
"!gvpl": "Greater Victoria Public Library",
"!fda": "FDA",
"!wowarmory": "WOW Armory",
"!musopen": "Musopen",
"!qrdecode": "ZXing QR decode",
"!dbasx": "DBA StackExchange",
"!ann": "Anime News Network",
"!kcommunity": "KDE Community Wiki",
"!dfm": "definefor.me",
"!htf": "HowtoForge",
"!htg": "How-To Geek shortcut",
"!dfw": "DwarfFortress Wiki",
"!debianforums": "Debian Forums",
"!gw2wiki": "Guild Wars 2 Wiki",
"!politiken": "Politiken.dk",
"!osalt": "osalt.com",
"!principien": "Principien",
"!texture": "Google Images",
"!androidpit": "AndroidPit",
"!imgur": "Imgur",
"!focalprice": "Focalprice",
"!blender": "Blender",
"!yummly": "Yummly",
"!lifehacker": "Lifehacker",
"!allabolag": "Alla Bolag",
"!goodreads": "GoodReads",
"!senderbase": "SenderBase",
"!utf8": "Unicode Character Search",
"!yopmail": "YopMail",
"!agdl": "The Annotated Grateful Dead Lyrics",
"!opensusesoftware": "OpenSUSE Software Search",
"!fefe": "Fefes Blog",
"!sloanes": "Integer Sequences",
"!govtrack": "govtrack.us",
"!googleuk": "Google UK",
"!trakt": "Trakt",
"!palgn": "PALGN",
"!wakoopa": "Wakoopa",
"!sdict": "SpanishDict",
"!fireburstvideos": "Videos - Fireburst",
"!yanswers": "Yahoo! Answers",
"!trello": "Trello",
"!lds": "The Church of Jesus Christ of Latter-day Saints",
"!cellartracker": "CellarTracker",
"!ebuk": "ebay.co.uk",
"!wiktit": "Wikizionario, il dizionario libero",
"!thomas": "Thomas.loc.gov",
"!login": "BugMeNot",
"!dictcc": "dict.cc",
"!wit": "Wikipedia IT",
"!wiq": "WordIQ",
"!cbc": "CBC News",
"!imbd": "IMBD",
"!techrights": "TechRights",
"!songsterr": "Songsterr",
"!trovaprezzi": "Trovaprezzi.it",
"!extratorrent": "ExtraTorrent",
"!amazonmp3": "Amazon MP3 Store",
"!911s": "911 Tabs Songs",
"!binsearch": "Binsearch",
"!posters": "All Posters",
"!rl": "Rapidlibrary",
"!freebsd": "FreeBSD",
"!baidu": "Baidu",
"!rg": "Rap Genius",
"!collegeconfidential": "CollegeConfidential Forums",
"!tubafm": "Tuba FM",
"!rb": "Redbubble",
"!nintendolife": "Nintendo Life",
"!domainsbot": "domainsbot.com",
"!ynews": "Yahoo! News",
"!whosampled": "who sampled",
"!rt": "Rotten Tomatoes",
"!plot": "FooPlot",
"!okgeek": "Okay Geek",
"!iusethis": "iusethis.com",
"!dhl": "DHL",
"!numerama": "Numerama",
"!sproducts": "Sproducts",
"!pylons": "Pylons Docs",
"!wordpress": "WordPress.org",
"!wikisum": "Wiki Summaries",
"!teamliquid": "Teamliquid",
"!debbug": "Debian Bugs",
"!pcmag": "Pcmag",
"!tureng": "Tureng Dictionary",
"!nbd": "Norwegian Bokmål Dictionary",
"!nba": "NBA.com",
"!dll": "DLL-files.com",
"!sloganmaker": "SloganMaker",
"!chip": "CHIP Online",
"!sphp": "SearchForPHP",
"!shopathome": "ShopAtHome.com",
"!perlmonks": "PerlMonks.org",
"!ajc": "AJC.com",
"!ajb": "Acha Jogo Barato",
"!teketen": "Teketen",
"!foodnetwork": "Food Network",
"!git-scm": "Git",
"!elen": "Google translate el-en",
"!bakabt": "BakaBT",
"!subs": "addic7ed",
"!compfight": "Compfight",
"!urlquery": "Urlquery",
"!qype": "Qype",
"!crunchyroll": "CrunchyRoll",
"!scoop": "Scoop.it",
"!regex": "Regular Expression Library",
"!tatoeba": "Tatoeba",
"!wikivs": "WikiVS",
"!lotrow": "LOTRO-Wiki",
"!cashback": "Sunshine Rewards",
"!vemo": "Vemo.it forniture alberghiere",
"!racket": "Racket",
"!capost": "Canada Post",
"!geocache": "Geocaching.com",
"!mcpan": "MetaCPAN",
"!zavvi": "Zavvi",
"!mitocw": "MIT OpenCourseWare",
"!son2teuf": "Son2Teuf",
"!searchyc": "HNsearch",
"!clipstijl": "Clipstijl",
"!laser": "LaserShip",
"!hgnc": "HUGO Gene Nomenclature Committee HGNC",
"!musiciansfriend": "Musician's Friend",
"!macupdate": "MacUpdate",
"!chords": "UltimateGuitar.com",
"!patternry": "Patternry",
"!gbooks": "Google Books",
"!iwant": "iWantMyName",
"!yatr": "Yandex Turkiye",
"!btn": "Broadcasthe.net",
"!activeden": "ActiveDen",
"!ebit": "ebay.it",
"!gfc": "greatfirewallofchina.org",
"!imagenet": "ImageNet",
"!newsday": "NewsDay.com",
"!gfi": "Google FI",
"!findlaw": "FindLaw",
"!nciku": "Nciku.com",
"!rhyme": "RhymeZone",
"!ebie": "ebay.ie",
"!gfr": "Google FR",
"!ebin": "ebay.in",
"!ii": "Ixquick Images",
"!huffpost": "Huffington Post",
"!nzbs": "nzbs.org",
"!bing": "Bing",
"!susebug": "openSUSE Bugzilla",
"!musicbrainz": "MusicBrainz",
"!date": "Date Sort",
"!im": "Google Images",
"!twinpedia": "Twinpedia",
"!passmark": "passmark.com",
"!magma": "MagmaWiki",
"!skepticsbible": "Skeptic's Bible",
"!sunpatch": "WeSunSolve",
"!lyricsmania": "Lyrics Mania",
"!macworld": "Macworld",
"!mskb": "Microsoft Knowledge Base",
"!osnews": "OSNews",
"!scope.dk": "scope.dk",
"!yegg": "Gabriel Weinberg's Blog",
"!mvn": "Maven Central",
"!facebook": "Facebook",
"!wtpt": "Wiktionary portuguese",
"!dipadova": "Perpetual DiPadova",
"!mhwiki": "The Monster Hunter Wiki",
"!answer": "Yahoo! Answers",
"!mathworks": "Matlab File Exchange",
"!captaincrawl": "CaptainCrawl",
"!indiamart": "IndiaMART",
"!wordnet": "WordNet",
"!seb": "Sebsauvage",
"!sec": "SEC.gov",
"!tsa": "TechSupportAlert.com",
"!photodune": "PhotoDune",
"!appcel": "Appcelerator API Docs",
"!zbmath": "Zentralblatt MATH Database",
"!esg": "ebay.com.sg",
"!cricinfo": "Cricinfo",
"!sep": "Stanford Encyclopedia of Philosophy",
"!seq": "The Online Encyclopedia Of Integer Sequences",
"!istockphoto": "iStockPhoto",
"!tsr": "The Student Room",
"!xkcdn": "XKCD Number",
"!ihep": "InspireHEP",
"!grepcode": "GrepCode.com",
"!alto": "AlternativeTo",
"!psimplified": "Programming Simplified",
"!lpbug": "Launchpad Bugs",
"!kapi": "KDE API Reference",
"!newsnow": "News Now",
"!crunchbase": "CrunchBase",
"!gmap": "Google Maps",
"!dailymotion": "DailyMotion",
"!themag": "The Mag",
"!prpm": "Pusat Rujukan Persuratan Melayu",
"!codeplex": "Codeplex",
"!kbugs": "KDE Bugs",
"!7digital": "7digital",
"!dpdt": "DPD Online Tracking",
"!pubmed": "PubMed",
"!mednar": "Mednar",
"!snopes": "Snopes.com",
"!uglifyjs": "UglifyJS",
"!eqd": "Equestria Daily",
"!tmd": "torrentsmd.com",
"!wrenit": "Wordreference en->it",
"!dictionary": "Dictionary.com",
"!osub": "OpenSubtitles.org",
"!glu": "Google Luxembourg",
"!scienceblogs": "Science Blogs",
"!usa": "Search.USA.gov",
"!usc": "Ubuntu Software Center",
"!reddit": "reddit",
"!pictures": "Google Images",
"!songmeanings": "SongMeanings",
"!wikt": "Wiktionary",
"!kasperskymal": "Securelist Malware info",
"!news": "Google News",
"!slackbuilds": "Slackbuilds",
"!torwiki": "Torproject Wiki",
"!newegg": "Newegg",
"!etree": "Live Music Archive",
"!mathematica": "Mathematica Documentation Center",
"!nlab": "nLab",
"!newsarchive": "Google News Archive",
"!rails": "Rails Docs",
"!gettyimages": "Getty Images",
"!wiki": "Wikipedia",
"!serebii": "Serebii",
"!tvtropes": "TV Tropes",
"!cheapassgamer": "Cheap Ass Gamer",
"!drupalcontrib": "DrupalContrib",
"!rutube": "RuTube",
"!inspire": "Inspire HEP ",
"!goosh": "Goosh",
"!microcenter": "Micro Center",
"!doi": "DOI.org",
"!scrape": "Scrape Torrent",
"!tiho": "TiHo-OPAC",
"!myallsearch": "MyAllSearch",
"!gramota": "gramota.ru",
"!brightstorm": "Brightstorm",
"!tcrf": "The Cutting Room Floor",
"!lcc": "Let’s CC",
"!nsr": "NationStates Region",
"!ideas": "RePEc IDEAS",
"!ilsole24ore": "Il Sole 24 Ore",
"!/.j": "Slashdot Japan",
"!verbomatic": "Verb-o-Matic",
"!nsn": "NationStates Nation",
"!forkd": "Forkd",
"!ros": "ROS",
"!ikeanl": "IKEA Nederland",
"!techcrunch": "Techcrunch",
"!crate": "Crate.io",
"!movies": "movies.io",
"!cwebstore": "Chrome Webstore",
"!staticice": "Static Ice",
"!fark": "Fark.com",
"!cake": "CakePHP API",
"!gtrends": "Google Trends",
"!blackbook": "BlackBook",
"!dblp": "DBLP",
"!gtnl": "Google Translate to Dutch",
"!ordbok": "Ordbok.no",
"!parabola": "Parabola GNU/Linux",
"!jarvanaclass": "www.jarvana.com",
"!rapidonline": "Rapid Online",
"!plan3t": "Plan3t.info",
"!beatport": "Beatport",
"!naver": "Naver",
"!oreilly": "O'Reilly",
"!zanran": "Zanran",
"!fwiki": "Fortran Wiki",
"!onimoto": "Onimoto.Com",
"!purolator": "Purolator shipment tracking",
"!br": "Baseball-Reference",
"!apackages": "ArchLinux Packages",
"!batlyrics": "Batlyrics",
"!ebates": "Ebates",
"!bkr": "Basketball Reference",
"!qssl": "Qualys SSL Labs",
"!vukajlija": "Vukajlija",
"!/.": "Slashdot",
"!dict.cc": "dict.cc",
"!traceroute": "DomainTools.com",
"!recipe": "Punchfork",
"!ggr": "Google GR",
"!gin": "Google IN",
"!rockethub": "RocketHub",
"!mercadolivre": "Mercado Livre",
"!stex": "Stack Exchange",
"!lastfm": "Last.fm",
"!piratenwiki": "Wiki der Piratenpartei Deutschland",
"!startribune": "The Star Tribune",
"!gua": "Google UA",
"!gaen": "Google Translate",
"!iqdb": "iqdb",
"!eat": "ebay.at",
"!eau": "ebay.com.au",
"!guk": "Google UK",
"!inci": "incisozluk",
"!wiggle": "www.wiggle.co.uk",
"!yfinance": "Yahoo Finance",
"!wimp": "wimp.com",
"!verb": "Verb-o-Matic",
"!dsv": "DSV SU",
"!ebayau": "ebay.com.au",
"!ebayat": "ebay.at",
"!appshopper": "Appshopper",
"!rym": "Rate Your Music",
"!android": "Android Developers",
"!indiegogo": "IndieGoGo",
"!slang": "The Online Slang Dictionary",
"!stackexchange": "stackexchange.com",
"!zumi": "Mapy Zumi",
"!wos": "World of Spectrum",
"!dawanda": "DaWanda",
"!vtkcd": "Visualization Toolkit VTK class documentation devel",
"!anisearch": "aniSearch.de",
"!imagery": "http://elzr.com/imagery/",
"!djpackages": "Django Packages",
"!radiotimes": "Radio Times",
"!metalstorm": "Metal Storm",
"!trl": "The Ring Lord",
"!trademarks": "Trademarkia",
"!orkut": "Orkut",
"!recordclick": "www.recordclick.com",
"!sfe": "Sparkfun Electronics",
"!trz": "Torrentz",
"!sfs": "stopforumspam.com",
"!chakrawiki": "The Chakra Project Wiki",
"!newark": "Newark",
"!rei": "REI",
"!gb": "Google Books",
"!gdrive": "Google Drive",
"!ge": "google.encypted.com",
"!gf": "Google Finance",
"!gg": "Google Groups",
"!gh": "GitHub.com",
"!gi": "Google Images",
"!gj": "Google Japan",
"!anyr": "DuckDuckGo",
"!swagbucks": "Swagbucks",
"!whitewater": "Whitewater Search Engine",
"!gn": "Google News",
"!gp": "Google Photos",
"!gr": "Google Reader",
"!gs": "Google Shopping",
"!gt": "Google Translate",
"!gv": "Google Video",
"!gd": "Google Docs",
"!thinkgeek": "ThinkGeek",
"!tmdb": "TheMovieDB.org",
"!siteduzero": "Site du Zéro",
"!sf2": "Symfony Framework",
"!gfaqs": "GameFAQS.com",
"!coupons": "DealTaker",
"!mogr": "moviegram",
"!wikitravel": "Wikitravel",
"!instructables": "Instructables",
"!matlab": "Matlab documentation",
"!ups": "UPS",
"!imfdb": "Internet Movie Firearms Database",
"!updown": "DownForEveryone?",
"!drae": "Diccionario de la lengua española",
"!emuparadise": "Emuparadise",
"!bartlets": "Bartelby",
"!exalead": "Exalead",
"!nsprings": "North Springs Community",
"!wdr": "webDevRefinery",
"!tmbw": "This MIght Be a Wiki",
"!gmane": "Gmane",
"!wdt": "Wikidata",
"!map": "Google Maps",
"!fireburst": "Fireburst Web Search",
"!freepascal": "Freepascal",
"!rakuten": "Rakuten",
"!wda": "Wikipedia DA",
"!winfu": "WinFuture",
"!man": "man.cx",
"!wde": "Wikipedia DE",
"!mac": "MAC Address",
"!iplayer": "iPlayer",
"!taringa": "Taringa!",
"!p4k": "Pitchfork Media",
"!pollstar": "Pollstar",
"!princeton": "Princeton",
"!slashdot": "Slashdot",
"!jetslide": "Jetslide News Reader",
"!wja": "Wikipedia JA",
"!stationsweb": "Stationsweb",
"!hs": "Honyaku Star",
"!java4": "Java4 Docs",
"!albumartdvd": "Albumart DVD",
"!snake": "Just Snake",
"!idioms": "The Free Dictionary",
"!qr": "QR Code Search",
"!nfl": "NFL.com",
"!code": "Search Code",
"!topsy": "Topsy",
"!adlibrisdk": "AdLibris DK",
"!qt": "Qt Reference Documentation",
"!java5": "Java5 Docs",
"!compete": "Compete",
"!gentoowiki": "Gentoo Wiki",
"!xgau": "Robert Christgau",
"!powells": "Powells",
"!weather": "WeatherBug",
"!careerbuilder": "CareerBuilder",
"!discogs": "Discogs",
"!gizmodo": "Gizmodo",
"!matlabfx": "Matlab File Exchange",
"!tomshardware": "Tom's Hardware",
"!historious": "Historious",
"!lolpro": "LoL Pro",
"!define": "The Free Dictionary",
"!amjp": "Amazon.co.jp",
"!hnsearch": "HNsearch",
"!gopher": "Floodgap Gopher",
"!qwiki": "Qwiki",
"!hoepli": "Hoepli",
"!camel": "CamelCamelCamel",
"!nestde": "Nestoria Deutschland",
"!mangafox": "Mangafox",
"!g24": "Google Past 24h",
"!appvv": "AppVV",
"!smention": "Social Mention",
"!thingiverse": "Thingiverse",
"!csharp": "C# MSDN",
"!crx": "Chrome Extension Repository",
"!searchworks": "SearchWorks SULAIR",
"!devonforum": "DEVONtechnologies Forum",
"!randt": "Road & Track",
"!wowwiki": "WoWWiki",
"!oldcpan": "CPAN",
"!habra": "habrahabr.ru",
"!gphotos": "Google Photos",
"!leboncoin": "Leboncoin.fr",
"!msmvps": "MSMVPs.com",
"!gm": "Google Maps",
"!newsfr": "Google News France",
"!bv": "Bing Videos",
"!ww": "Whitewater Search Engine",
"!bs": "Bing Shopping",
"!sbo": "SlackBuilds.org",
"!bn": "Barnes & Noble",
"!bm": "bing maps",
"!pecl": "PECL :: The PHP Extension Community Library",
"!bi": "Bing Images",
"!orthodoxwiki": "OrthodoxWiki",
"!ebert": "RogerEbert.com",
"!bd": "baidu",
"!bc": "DuckDuckGo",
"!bb": "Bitbucket",
"!ba": "Beer Advocate",
"!targeo": "Targeo",
"!lyricwiki": "LyricWiki",
"!ezydvd": "EzyDVD",
"!wot": "Web of Trust",
"!quizlet": "Quizlet",
"!liquidpedia": "Liquidpedia",
"!wog": "World of Gnome",
"!yandexen": "Yandex english",
"!target": "Target",
"!mlb": "The official site of Major League Baseball",
"!nethack": "NetHack Wiki",
"!gcpan": "CPAN -> GREP",
"!thefullwiki": "The Full Wiki",
"!musikm": "Musik Meisinger Burghausen",
"!wa": "WolframAlpha",
"!tweet": "Tweet",
"!newgrounds": "Newgrounds",
"!fishpond": "Fishpond",
"!summitpost": "SummitPost",
"!debian": "Debian",
"!cald": "Cambridge Advanced Learner's Dictionary",
"!docs": "Scribd.com",
"!websitedown": "Website Down",
"!twitch": "TwitchTV",
"!play": "Play.com",
"!faucet": "Faucet.com",
"!shopping": "Google Shopping",
"!what": "What",
"!firefox": "Firefox Add-ons",
"!giga": "GIGABLAST",
"!molw": "WebQC.org",
"!bang": "DuckDuckGo",
"!gch": "Google CH",
"!sanalpazar": "Sanal Pazar",
"!package": "PackageMapping",
"!abcnews": "ABCNews",
"!zapiks": "Zapiks",
"!safe": "DDG Safesearch On",
"!nds": "Nachdenkseiten",
"!sparknotes": "SparkNotes",
"!alc": "Eijiro on the web",
"!ali": "Aliexpress",
"!fbr": "Pro Basketball Reference",
"!xbox": "Xbox",
"!tumblr": "Tumblr",
"!fatwallet": "FatWallet",
"!meneame": "Menéame",
"!comicvine": "Comic Vine",
"!winehq": "WineHQ",
"!baseballreference": "Baseball-Reference",
"!boxoh": "Boxoh",
"!criticker": "Criticker",
"!gsmarena": "GSMArena",
"!govimages": "US Government Image Search",
"!gee": "Google EE",
"!ncix": "NCIX.com",
"!glatest": "Google Latest",
"!freenode": "Freenode",
"!datasheet": "AllDataSheet",
"!buy": "Amazon.com",
"!omim": "http://www.ncbi.nlm.nih.gov/omim",
"!ges": "Google Spain",
"!bibletools": "Bible Study Tools",
"!kapaza": "Kapaza",
"!behindsurname": "http://surnames.behindthename.com/",
"!joystiq": "Joystiq",
"!dlang": "D Programming Language",
"!marmiton": "Marmiton",
"!freshmeat": "FreshMeat.net",
"!tex": "TeX Stack Exchange",
"!fantasy": "fantasy.fr",
"!bgp": "Hurricane Electrics Internet Services",
"!vsg": "Value Stock Guide",
"!ebfr": "ebay.fr",
"!bgg": "BoardGameGeek",
"!webstats": "webstatsdomain",
"!jorginho": "[email protected]",
"!ebaych": "ebay.ch",
"!gday": "Google Past Day",
"!evri": "Evri",
"!gkr": "Google KR",
"!serch": "de.serchilo.net",
"!ebayca": "ebay.ca",
"!weboftrust": "Web of Trust",
"!parking": "ParkWhiz",
"!pcgw": "PCGamingWiki",
"!ringtones": "Brinked",
"!versiontracker": "VersionTracker.com",
"!truveo": "Truveo",
"!procog": "ProCog",
"!startpageimages": "Startpage images",
"!weatherspark": "Weather Spark",
"!sfpl": "San Francisco Public Library",
"!openlibrary": "Open Library",
"!data": "Infochimps",
"!duden": "Duden",
"!rut": "rutracker.org",
"!newsmax": "NewsMax.com",
"!esseffr": "ESSEF fr",
"!infospace": "Infospace",
"!overstock": "Overstock",
"!dpts": "Debian Packages",
"!rtnews": "RT",
"!xanga": "Xanga",
"!mefi": "MetaFilter",
"!salixforums": "SalixOS Forums",
"!ech": "ebay.ch",
"!ticketmaster": "Ticket Master",
"!openstreetmap": "OpenStreetMap",
"!eca": "ebay.ca",
"!cercavino": "CercaVino",
"!quotes": "WikiQuote",
"!mpl": "matplotlib",
"!sampled": "whosampled",
"!caranddriver": "Car and Driver",
"!distrowatch": "DistroWatch",
"!metacafe": "Metacafe",
"!metaso": "Meta Stack Overflow",
"!macports": "MacPorts",
"!arstechnica": "ArsTechnica",
"!pixiv": "Pixiv",
"!dilbert": "Dilbert",
"!whatis": "WhatIs.com",
"!beer": "BeerAdvocate",
"!exploitdb": "Exploit DB",
"!scp": "SCP Foundation",
"!seznam": "Seznam",
"!pastebin": "pastebin",
"!rinf": "RINF Alternative news",
"!ktb": "KDE TechBase",
"!telegraph": "Telegraph",
"!seatgeek": "SeatGeek",
"!newffr": "NewFFR",
"!gpsies": "GPSies.de",
"!soundcloud": "SoundCloud",
"!mediadico": "Mediadico",
"!csdb": "The C64 Scene Database",
"!enlt": "Google Translate English to Lithuanian",
"!chakraforum": "The Chakra Project",
"!hatesloganmaker": "Sloganmaker /hate",
"!meteofr": "Météo-France",
"!wzh": "Wikipedia ZH",
"!quixey": "Quixey",
"!123people": "123people",
"!biblsrv": "Bibleserver",
"!ap": "AP",
"!at": "Addictive Tips",
"!au": "African Unioun",
"!css": "CSS",
"!ah": "Así hablamos",
"!allocine": "Allocine.com",
"!csw": "Chicken Scheme Wiki",
"!am": "Amazon.com",
"!erlang": "Erlang Docs",
"!chakrapkg": "The Chakra Project Packages",
"!whu": "Wikipedia Hungary",
"!themeforest": "ThemeForest",
"!gamebanana": "GameBanana",
"!rootsarchives": "Roots Archives",
"!fileinfo": "File info",
"!canoo": "canoonet",
"!autocar": "Autocar.co.uk",
"!who": "who.is",
"!about": "About.com",
"!car": "CarMagazine.co.uk",
"!whi": "Wikipedia HI",
"!sqlite": "SQLite",
"!sm": "Smashing Magazine",
"!webtender": "Webtender",
"!so": "StackOverflow",
"!sh": "SimplyHired",
"!si": "SI.com",
"!sk": "songkick.com",
"!sd": "Slickdeals",
"!sf": "ServerFault",
"!gamepro": "GamePro",
"!studydroid": "StudyDroid",
"!sc": "Soundcloud",
"!ccst": "CSS Tricks",
"!graphemica": "Graphemica",
"!sx": "stackexchange.com",
"!liveleak": "http://www.liveleak.com/",
"!su": "SuperUser",
"!sv": "La Simpla Vortaro ",
"!sw": "Simple English Wikipedia",
"!sp": "StartPage",
"!mpla": "MenuPages",
"!sr": "reddit",
"!ss": "Subscene",
"!obb": "oremus Bible Browser",
"!traffic": "Google Traffic",
"!arturogoga": "Arturogoga",
"!burnbit": "Burnbit",
"!gnomebugs": "GNOME Bugzilla",
"!w3c": "W3C",
"!clipart": "Microsoft Clipart",
"!flickrc": "Flickr comm/deriv",
"!tigerdirect": "TigerDirect",
"!homedepot": "Home Depot",
"!mdbg": "MDBG Chinese-English dictionary",
"!lparchive": "Let's Play Archive",
"!thesession": "The Session",
"!nck": "Nciku",
"!gifl": "Google: I'm Feeling Lucky",
"!forrst": "Forrst",
"!trueknowledge": "True Knowledge",
"!gutenberg": "Project Gutenberg",
"!susepkg": "openSUSE Software",
"!cbssports": "CBS Sports",
"!rlib": "Rapidlibrary",
"!amf": "Ask Metafilter",
"!ncz": "Nciku Chinese",
"!dfman": "DragonFly BSD manual pages",
"!enpt": "google translate en-pt",
"!sconj": "SpanishDict verb conjugation",
"!kicknews": "KickNews",
"!millionshort": "Million short",
"!dcc": "dict.cc",
"!sfgate": "SFGate",
"!newsweek": "Newsweek",
"!epicurious": "Epicurious",
"!archaur": "ArchLinux User Repository",
"!fsf": "Free Software Foundation",
"!cafepress": "CafePress",
"!bizrate": "Bizrate",
"!gmusic": "Google Music",
"!sskj": "Slovar slovenskega knjižnega jezika",
"!color": "0to255.com",
"!perezhilton": "PerezHilton",
"!starwars": "Wookiepedia",
"!macdic": "Macmillan Dictionary",
"!gtr": "Google TR",
"!exex": "Experts Exchange",
"!gtw": "Google TW",
"!academicearth": "Academic Earth",
"!gtu": "TechGujarat",
"!scan": "Scan",
"!lyricful": "Lyricful",
"!thesaurus": "Thesaurus.com",
"!lar": "Larousse Français",
"!wordnik": "Wordnik",
"!viul": "Vancouver Island University Library",
"!mio": "Movies.io",
"!showtimes": "Google Movies",
"!steam": "Steam",
"!biznar": "Biznar",
"!foolz": "FoOlz [4chan] Archives",
"!dpv": "Dogpile Video",
"!dpw": "Dogpile Web",
"!.net": "DotNet Docs",
"!wissen": "wissen.de",
"!viasona": "Viasona",
"!torrentfreak": "Torrent Freak",
"!deviantart": "DeviantART",
"!monster": "Monster.com",
"!dpd": "Diccionario panhispánico de dudas",
"!rubydoc": "Ruby-Doc.org",
"!dpb": "Depiraatbaai",
"!dpc": "DVDpasCher",
"!dpl": "Dogpile Local",
"!dpn": "Dogpile News",
"!dpi": "Dogpile Images",
"!spice": "Spiceworks Community",
"!nokiamaps": "Nokia Maps",
"!modulusfe": "Modulus Financial Engineering",
"!archforums": "Arch Linux Forums",
"!bugmenot": "BugMeNot",
"!independent": "The Independent",
"!amiami": "Amiami",
"!urbancomfort": "Urban Comfort",
"!ebhk": "ebay.com.hk",
"!php": "PHP.net",
"!ncbi": "NCBI",
"!ninja": "Ninjawords",
"!mdn": "Mozilla Developer Network",
"!icheckmovies": "iCheckMovies",
"!bvideo": "Bing Videos",
"!smog": "Smogon University",
"!tubalr": "Tubalr",
"!tinyurl": "TinyURL.com",
"!hpv": "Heise.de Preisvergleich",
"!memoryalpha": "Memory Alpha",
"!justsnake": "A blog about snakes",
"!msdn": "MSDN",
"!download": "Download.com",
"!chammy": "Chammy.info",
"!metalarch": "Metal Archives",
"!tf2": "TF2 Wiki",
"!earthcam": "earthcam",
"!wtf": "MirBSD Acronyms Database",
"!madeinusa": "madeinusa",
"!oer": "OER Commons",
"!ausgov": "australia.gov.au",
"!wtr": "Vikipedi",
"!dfiles": "Debian file search",
"!allmovie": "AllMovie",
"!equestriadaily": "Equestria Daily",
"!oed": "Oxford English Dictionary",
"!telefonterror": "Telefonterror",
"!fedorawiki": "Fedora Project",
"!dstgb": "StGB - dejure.org",
"!treccani": "treccani",
"!tpb": "The Pirate Bay",
"!sdn": "SAP Developer Network Forums",
"!youku": "Youku",
"!ixquick": "ixquick",
"!dwiki": "Davis Wiki",
"!yandexmaps": "Yandex Maps",
"!malist": "MyAnimeList",
"!mobygames": "Moby Games",
"!gdocs": "Google Docs",
"!cached": "Google Cache",
"!beoes": "Beolingus De-es",
"!sdz": "Site du Zer0",
"!root": "ROOT",
"!shodan": "SHODAN",
"!viki": "Estonian Wikipedia Vikipeedia",
"!redtram": "RedTram",
"!codepoints": "Codepoints",
"!gmonth": "Google Past Month",
"!geartested": "GearTested.Net",
"!unwiki": "Uncyclopedia",
"!stubhub": "StubHub",
"!lucire": "Lucire",
"!technet": "Microsoft TechNet",
"!tfd": "The Free Dictionary",
"!finalfantasy": "Final Fantasy Wiki",
"!market": "Android Market",
"!gisafeoff": "Google Images Safe Off",
"!duedil": "Duedil",
"!faff": "Film Affinity",
"!globo": "Globo.com",
"!themoviedb": "TheMovieDB.org",
"!icm": "iCheckMovies",
"!wowhead": "WowHead",
"!bingmaps": "Bing Maps",
"!chacha": "ChaCha",
"!evirtau": "Estante Virtual by author",
"!evirtat": "Estante Virtual by author and/or title",
"!ede": "ebay.de",
"!trailer": "Trailer Addict",
"!gpl": "Google PL",
"!album": "Allmusic Album Search",
"!gpe": "Google PE",
"!gpd": "GoPkgDoc",
"!yippy": "Yippy",
"!cpan1": "MetaCPAN",
"!morningstar": "Morningstar Search",
"!dilandau": "DilanDau",
"!thomann": "Thomann",
"!rseek": "RSeek",
"!tbuff": "TechnoBuffalo",
"!ixquickimages": "Ixquick images",
"!gweek": "Google Past Week",
"!metacrawler": "MetaCrawler",
"!derstandard": "derStandard.at",
"!markos": "markosweb",
"!cpanm": "MetaCPAN",
"!coalgirls": "Coalgirls",
"!kym": "Know Your Meme",
"!codex": "WordPress Codex",
"!zuckerzauber": "Caros Zuckerzauber Shop",
"!userscripts": "UserScripts",
"!wuk": "Wikipedia UK",
"!reflets": "Reflets.info",
"!tripadvisor": "Trip Advisor",
"!uude": "Ubuntuusers",
"!wten": "Wiktionary",
"!tayda": "Tayda Electronics",
"!kayak": "Kayak",
"!beo": "Beolingus De-en",
"!ytw": "Yaoo! Taiwan",
"!ftube": "FilesTube",
"!cheatcc": "CheatCC.com",
"!tunein": "TuneIn",
"!economist": "The Economist",
"!keepvid": "KeepVid",
"!gimages": "Google Images",
"!meme": "Know Your Meme",
"!ifdb": "Interactive Fiction Database",
"!gbang": "GiggleBang",
"!osmw": "OpenStreetMap Wiki",
"!ubuntuforums": "Ubuntu Forums",
"!usatoday": "USA Today",
"!wtsv": "Wiktionary SV",
"!glv": "Google LV",
"!flipkart": "Flipkart",
"!ehk": "ebay.com.hk",
"!gturl": "Google Translate URL",
"!wpt": "Wikipedia PT",
"!e2": "Everything 2",
"!win": "Microsoft Windows",
"!wpp": "WordPress.org Plugins",
"!roku": "Roku",
"!gatherer": "Gatherer",
"!ceneo": "Ceneo",
"!ka": "Khan Academy",
"!piratesea": "Pirate Sea",
"!musipedia": "Musipedia",
"!puppet": "Puppet Forge",
"!sxc": "stock.xchng",
"!eve": "Eve Online",
"!blendapi": "Blender API",
"!ks": "KickStarter",
"!wmeta": "Wikimedia Meta-Wiki",
"!ad": "Ask Different",
"!ebuilds": "Gentoo Packages",
"!eo": "Online Etymology Dictionary",
"!em": "eBay Motors",
"!cplusplus": "Cplusplus.com",
"!d&d": "d20 SRD",
"!eg": "Epguides",
"!ed": "Encyclopedia Dramatica",
"!ee": "ellislab",
"!eb": "eBay",
"!ec": "http://ecosia.org/",
"!quantcast": "Quantcast",
"!kindle": "Amazon Kindle",
"!rbugs": "Redhat Bugzilla",
"!wp7": "Windows Phone 7 Marketplace",
"!svb": "Silicon Valley Bank",
"!eqbeats": "Equestrian Beats",
"!ep": "Experience Project",
"!startpagevideos": "StartPage videos",
"!ohlohcode": "Ohloh Code",
"!fancy": "TheFancy",
"!mpfl": "MenuPages",
"!800": "800notes",
"!foxnews": "FoxNews",
"!9gag": "9gag",
"!khronos": "Khronos",
"!dota": "DOTA 2 wiki",
"!kazazz": "Kazazz",
"!nutridata": "nutritiondata",
"!tproj": "Torrent Project",
"!monoprice": "Monoprice",
"!eksisozluk": "Ekşi Sözlük",
"!sozluk": "Eksi Sozluk",
"!rpmfind": "RPMfind.net",
"!planetmc": "Planetminecraft",
"!encyclopedia": "Wikipedia",
"!hypestat": "hypestat",
"!dart": "dartlang",
"!gametrailers": "Gametrailers",
"!vs": "Vertical Set",
"!dictleode": "Leo Translation EN<->DE",
"!abandonia": "Abandonia",
"!searchmygmail": "Search My Gmail",
"!omg": "Jaen",
"!bliptv": "Blip.tv",
"!vb": "VedaBase",
"!clojars": "Clojars",
"!clker": "Clker",
"!vg": "Google",
"!vk": "Vkontakte",
"!grical": "GriCal",
"!guitartabs": "GuitarTabsExplorer.com",
"!netcraft": "Netcraft",
"!bmp3": "BeeMP3",
"!mbug": "Mozilla Bugzilla",
"!dafont": "DaFont",
"!ecosia": "Ecosia",
"!expedia": "Expedia",
"!blekkoi": "blekko.com/ws/ ... /images",
"!uitinv": "www.uitinvlaanderen.be",
"!scholarpedia": "Scholarpedia",
"!zipca": "Zip.ca",
"!dreamincode": "Dream In Code",
"!bitgamer": "BitGamer",
"!make": "Make Magazine Blog",
"!flashback": "Flashback",
"!transfermarkt": "transfermarkt.de",
"!businessweek": "Business Week",
"!ppa": "Personal Package Archives for Ubuntu",
"!ppc": "PocketPC.ch",
"!gigaom": "GigaOM",
"!gist": "gists.github.com",
"!npr": "NPR.org",
"!ipartsplanet": "ipartsplanet.com",
"!npm": "Node Package Manager",
"!linuxmint": "Linux Mint Forums",
"!c2": "C2 Wiki",
"!willhaben": "Willhaben",
"!quora": "Quora",
"!bestbuy": "BestBuy",
"!graph": "Graph.tk",
"!digitalcomics": "Digital Comic Museum",
"!tvcom": "tv.com",
"!docjar": "Docjar",
"!dogpile": "Dogpile",
"!gssl": "Google with SSL",
"!march": "Metal Archives",
"!diasporatags": "Diaspora Tags",
"!popsike": "Popsike",
"!zoho": "Zoho",
"!behindthename": "Behind the Name",
"!metrolyrics": "Metro Lyrics",
"!gbr": "Google BR",
"!ebmy": "ebay.com.my",
"!collaj": "collaj",
"!operaext": "Opera Extensions",
"!retailmenot": "RetailgMeNot",
"!gbe": "Google BE",
"!gbg": "Google BG",
"!gbk": "Google Bookmarks",
"!gbm": "Google Bookmarks",
"!archiveis": "Archive.is",
"!coral": "Coral CDN",
"!vol": "Viva o Linux",
"!goodfilms": "Goodfil.ms",
"!appengine": "App Engine Docs",
"!yubnub": "YubNub",
"!prfc": "Pretty-RFC",
"!kpop": "allkpop",
"!thumbplay": "Thumb Play",
"!salon": "Salon.com",
"!dexonline": "Dexonline",
"!groovy": "Groovy API",
"!iconfinder": "IconFinder",
"!dapi": "Drupal API",
"!grelated": "Google",
"!newsvine": "Newsvine",
"!terraria": "Terraria Wiki",
"!uman": "Ubuntu Manpages",
"!wtfr": "Wiktionnaire Wiktionary FR",
"!yui": "YUI Docs",
"!delluk": "Dell UK",
"!totalcmd": "TotalCmd.Net",
"!cstheory": "Theoretical Computer Science - Stack Exchange",
"!thepiratebay": "The Pirate Bay",
"!metacpan": "MetaCPAN",
"!amukmp3": "Amazon.co.uk MP3",
"!hark": "Hark",
"!gumtree": "gmtr",
"!simbad": "SIMBAD Astronomical Database",
"!ygo": "Yu-Gi-Oh! Wikia",
"!jf": "JarFinder",
"!geonames": "GeoNames",
"!jb": "jupiter broadcasting",
"!jl": "John Lewis",
"!irs": "IRS",
"!selfhtml": "SELFHTML",
"!tof": "Tree Of Life Web Project",
"!js": "JavaScript Docs",
"!jq": "jQuery Docs",
"!johnlewis": "John Lewis",
"!irc": "irc.netsplit.de",
"!lmgtfy": "LMGTFY",
"!di": "Dictionary.com",
"!mdc": "Mozilla Developer Center",
"!dj": "Django documentation",
"!dm": "Dailymotion",
"!tweetgrid": "Tweetgrid",
"!epubbud": "ePub Bud",
"!da": "DeviantART",
"!dc": "DuckDuckGo Community Forum",
"!c1024": "Dem Commander1024 sein Blog",
"!c1neon": "C1neon",
"!df": "Daring Fireball",
"!tf2wiki": "Team Fortress Official Wiki",
"!dx": "DealExtreme",
"!sectube": "Security Tube",
"!dp": "Dogpile",
"!espn": "ESPN",
"!librivox": "LibriVox",
"!kaskus": "Kaskus",
"!wnetwork": "The Weather Network",
"!mongo": "MongoDB",
"!debianfr": "Debian pages in french language",
"!fedex": "FedEx",
"!slideshare": "SlideShare",
"!allrovi": "AllRovi",
"!boomkat": "boomkat",
"!modx": "MODX",
"!atx": "Atomix.vg",
"!java": "Oracle Java Documentation",
"!tones": "Thumb Play",
"!walpha": "WolframAlpha",
"!enit": "Google translate en-it",
"!uloz": "uloz.to",
"!appbrain": "AppBrain",
"!poetry": "The Poetry Foundation",
"!thefreedictionary": "The Free Dictionary",
"!redfin": "RedFin",
"!symfony": "Symfony Framework",
"!podnapisi": "Podnapisi",
"!giez": "Geizhals",
"!yii": "Yii Framework",
"!sesli": "Sesli Sözlük",
"!hackernews": "HNsearch",
"!youtify": "Youtify",
"!fbaddons": "Browser Addons: Fireburst",
"!ticketnetwork": "TicketNetwork",
"!washingtonpost": "The Washington Post",
"!desura": "Desura",
"!graphicriver": "GraphicRiver",
"!jishoj": "Jisho JAP to ENG",
"!ebph": "ebay.ph",
"!ebpl": "ebay.pl",
"!bvideos": "Bing Videos",
"!ign": "IGN",
"!jishoe": "Jisho ENG to JAP",
"!cisco": "Cisco",
"!wiktde": "Wiktionary, das freie Wörterbuch",
"!veekun": "Veekun Pokedex",
"!geekcafe": "Le geek café",
"!qomun": "Qomun",
"!ikeade": "Ikea Germany",
"!ymovies": "Yahoo! Movies",
"!unicode": "Unicode Character Search",
"!epicmafia": "Epic Mafia Wiki",
"!ebnl": "ebay.nl",
"!loc": "LOC.gov",
"!python": "Python Docs",
"!subscene": "Subscene",
"!findchips": "FindChips",
"!qrobei": "qrobe.it Images",
"!finn": "Finn",
"!doctrine": "Doctrine ORM",
"!rottentomatoes": "Rotten Tomatoes",
"!netzpolitik": "Netzpolitik.org",
"!t411": "Torrent 411",
"!synonymes": "Synonymes",
"!seamonkey": "Seamonkey Add-on",
"!host": "IP Lookup",
"!ncheap": "NameCheap",
"!webcite": "WebCitation",
"!dbyte": "Darkbyte",
"!fdroid": "F-Droid",
"!elibre": "L'étudiant libre",
"!koders": "Koders",
"!aops": "Art of Problem Solving Wiki",
"!play.com": "Play.com",
"!serverfault": "ServerFault",
"!emacswiki": "EmacsWiki",
"!cracked": "Cracked",
"!freebsdman": "FreeBSD Man Pages",
"!gcz": "Google CZ",
"!d20srd": "d20 SRD",
"!britannica": "Britannica.com",
"!bol": "Bol.com",
"!tpbs": "The Pirate Bay sorted by seeds",
"!gca": "Google CA",
"!gcn": "Google CN",
"!gco": "Google CO",
"!greader": "Google Reader",
"!adslabs": "NASA ADS Labs",
"!ghacks": "Ghacks Technology News",
"!whitepages": "InfoSpace",
"!christmas": "Find Christmas Presents",
"!postgresql": "PostgreSQL.org",
"!photobucket": "PhotoBucket.com",
"!ordsv": "Ord.se SV",
"!freitag": "Der Freitag",
"!jbhifi": "JB-HI-FI",
"!filmweb": "Filmweb",
"!ees": "ebay ES",
"!imslp": "IMSLP",
"!apple": "Apple",
"!itunes": "iTunes Apps",
"!books": "Amazon.com",
"!ubuntuf": "Ubuntu Forums",
"!ebaymy": "ebay.com.my",
"!intaljazeera": "Aljazeera",
"!gigablast": "GIGABLAST",
"!blogspot": "Blogspot",
"!bchain": "blockchain.info",
"!valleywag": "Valleywag",
"!whatif": "what if?",
"!digitalcomicmuseum": "Digital Comic Museum",
"!booko": "Booko",
"!8tracks": "8tracks",
"!naruto": "NarutoPedia",
"!shodanhq": "SHODAN",
"!gookokugo": "Goo国語辞典検索",
"!crawl": "CrawlWiki",
"!processing": "Processing.org",
"!guardian": "The Guardian",
"!nrc": "NRC",
"!asknews": "Ask News",
"!ebookee": "Ebookee",
"!leoe": "LEO Dictionary English",
"!habbotrading": "Habbo Ruilwaarde",
"!python30": "Python3.0 Docs",
"!python31": "Python3.1 Docs",
"!python32": "Python3.2 Docs",
"!php2py": "Php2Python",
"!webstagram": "Webstagram",
"!diaspora": "Diaspora",
"!ping": "DomainTools.com",
"!gph": "Google PH",
"!parashift": "Parashift.com",
"!noaa": "NOAA",
"!fports": "FreeBSD Ports",
"!discussion": "Google Discussion Search",
"!artwork": "artcyclopedia.com",
"!webmenu": "webmenu",
"!youdao": "Youdao",
"!eksi": "ek$i sozluk",
"!otran": "Open-Tran.eu",
"!entrez": "Entrez",
"!doaj": "Directory of Open Access Journals",
"!scribd": "Scribd",
"!verge": "The Verge",
"!networkx": "networkx",
"!songmeaning": "Songmeaning",
"!markmail": "MarkMail",
"!gamejaunt": "Game Jaunt",
"!wp7fr": "Windows Phone 7 Marketplace France",
"!etym": "etymonline.com",
"!py3k": "Python 3 docs",
"!iec": "Institut d'Estudis Catalans",
"!rswiki": "Runescape Wiki",
"!archive": "Internet ",
"!gve": "Google VE",
"!rlslog": "Releaselog",
"!gviewer": "Google Docs Viewer",
"!11870": "11870",
"!gvn": "Google VN",
"!geizhals": "Geizhals",
"!gview": "Google Docs Viewer",
"!playlist": "Playlist.com",
"!ebaynl": "ebay.nl",
"!btcaddr": "blockchain.info",
"!adsabs": "SAO/NASA Astrophysics Data System",
"!cartoonnetwork": "Cartoon Network",
"!anime": "aniDB",
"!knowyourmeme": "Know Your Meme",
"!gizoogle": "Gizoogle",
"!csc": "Chicken Scheme Chickadee",
"!channel4": "Channel 4",
"!channel5": "Channel 5",
"!sheetmusicplus": "Sheet Music Plus",
"!gyear": "Google Past Year",
"!tldp": "The Linux Documentation Project",
"!half": "Half.com",
"!daum": "Daum",
"!arch": "ArchLinux Wiki",
"!splunk": "Splunk",
"!..": "..duo dotdotduo",
"!github": "GitHub.com",
"!lenovo": "Lenovo",
"!linkup": "LinkUp",
"!icons": "IconFinder",
"!tyda": "Tyda.se",
"!fc": "FreeCode",
"!fb": "Facebook",
"!torrent": "ISOHunt",
"!theh": "the h",
"!ff": "fanfiction.net",
"!dkwk": "DokuWiki",
"!fd": "Free Dictionary",
"!sourceforge": "SourceForge.net",
"!bitbucket": "BitBucket.org",
"!fotolog": "Fotolog",
"!rae": "Diccionario de la lengua española",
"!tcl": "TCLers Wiki",
"!ft": "Financial Times",
"!bookdepository": "The Book Depository",
"!fy": "Fuck Yeah",
"!digg": "Digg",
"!fsfd": "Free Software Directory",
"!tigsource": "TIGSource",
"!bpedia": "Bulbapedia",
"!audiojungle": "AudioJungle",
"!pricegrabber": "Price Grabber",
"!openstreet": "OpenStreetMap",
"!flex": "Flex Docs",
"!addic7ed": "Addic7ed",
"!down": "Down For Everyone Or Just Me",
"!myfonts": "Myfonts",
"!dotlan": "Dotlan EVE Maps",
"!nnd": "Norwegian Nynorsk Dictionary",
"!heureka": "Heureka!",
"!vimdoc": "Vimdoc",
"!pinvoke": "Pinvoke.net",
"!ehow": "eHow",
"!matplotlib": "matplotlib",
"!freedictionary": "Free Dictionary",
"!nginxwiki": "nginx wiki",
"!wes": "Wikipedia ES",
"!weu": "Wikipedia",
"!ites": "Google translate it-es",
"!headfi": "Head-Fi",
"!cve": "Common Vulnerabilities and Exposures CVE",
"!web": "Web Platform",
"!fimfic": "FimFiction",
"!iteo": "Vocabolario Italiano-Esperanto Minnaja",
"!iten": "Google translate it-en",
"!gfl": "Google",
"!2dehands": "2dehands.be",
"!myanimelist": "MyAnimeList",
"!wen": "Wikipedia EN",
"!weo": "Wikipedia EO",
"!cdt": "China Digital Times",
"!searchch": "search.ch",
"!ahk": "AutoHotkey",
"!dottk": "Dot TK",
"!imdb": "IMDB",
"!blindsearch": "Blind Search",
"!toma": "Toma.hk",
"!cdc": "CDC",
"!ahw": "Arkham Horror Wiki",
"!ebaybe": "ebay.be",
"!cdn": "Cdn js",
"!tlfi": "Trésor de la Langue Française informatisé",
"!pw": "Pricewatch",
"!wolfram": "WolframAlpha",
"!ps": "Picsearch",
"!blocket": "Blocket",
"!stagevu": "StageVu",
"!py": "Python Docs",
"!bsdman": "FreeBSD",
"!nametoolkit": "NameToolkit",
"!srfi": "Scheme Request for Implementation",
"!ifixit": "iFixit",
"!domain": "NameCheap",
"!archbugs": "ArchLinux Bugtracker",
"!pb": "Pinboard",
"!4sq": "Jaen",
"!jquery": "jQuery Docs",
"!tickets": "StubHub",
"!firefoxmarket": "Firefox Marketplace",
"!bcwiki": "Bitcoin Wiki",
"!numpy": "numpy",
"!srcforge": "SourceForge",
"!tvguide": "TV Guide",
"!friendster": "Friendster",
"!thenation": "The Nation",
"!eggtimer": "Egg Timer",
"!911": "911tabs",
"!dnb": "Deutsche National Bibliothek",
"!tasteline": "Tasteline",
"!librarything": "LibraryThing",
"!forvo": "Forvo",
"!querycat": "QueryCat",
"!findjar": "FindJar.com",
"!dns": "DNS Lookup",
"!pcpartpicker": "PCPartPicker",
"!playterm": "PlayTerm",
"!gw2": "Guild Wars 2 Wiki",
"!gnews": "Google News",
"!mtv": "MTV",
"!arcgisres": "ArcGIS Resource Center",
"!askubuntu": "Ask Ubuntu",
"!dw": "Deeper Web",
"!springsource": "SpringSource",
"!aliexp": "Aliexpress",
"!cnbc": "CNBC",
"!stupi": "Stupidedia",
"!gae": "Google AE",
"!bingimages": "Bing Images",
"!pokepedia": "Pokepedia",
"!macys": "Macy's",
"!gat": "Google AT",
"!gau": "Google AU",
"!jstor": "JSTOR",
"!gar": "Google AR",
"!gas": "Google American Samoa",
"!2ememain": "2ememain.be",
"!superd": "SuperDownloads",
"!gwp": "Google Wikipedia",
"!wlfind": "cablegatesearch",
"!ymaps": "Yahoo Maps",
"!unity": "Unity Answers",
"!beopt": "Beolingus De-pt",
"!xkcd": "xkcd",
"!libuniversitaria": "Libreria Universitaria",
"!gcl": "Google CL",
"!allexperts": "All Experts",
"!adplanner": "Google Ad Planner",
"!python27": "Python2.7 Docs",
"!python26": "Python2.6 Docs",
"!pricespy": "pricespy.co.nz",
"!zdnet": "ZDNet",
"!sweetsearch": "SweetSearch4me",
"!evernote": "Evernote",
"!appnr": "appnr",
"!wresfr": "Wordreference",
"!drinkify": "Drinkify",
"!marktplaats": "Marktplaats",
"!gdgt": "gdgt",
"!distro": "DistroWatch",
"!okazii": "Okazii",
"!duck.co": "duck.co",
"!wired": "Wired",
"!couponmeup": "CouponMeUp",
"!zhen": "Nciku",
"!spezify": "Spezify",
"!zillow": "Zillow.com",
"!kotobank": "コトバンク用語検索",
"!wtde": "Wiktionary Deutsch",
"!tastekid": "TasteKid",
"!gopkg": "GoPkgDoc",
"!gawker": "Gawkr",
"!googlemap": "Google Maps",
"!hulu": "Hulu",
"!wikisimple": "Wikipedia Simple",
"!oeis": "The On-Line Encyclopedia of Integer Sequences",
"!veronica": "Veronica2",
"!blekko": "Blekko",
"!concerts": "Pollstar",
"!filecrop": "filecrop",
"!mtg": "magiccards.info",
"!sqlalchemy": "SQL Alchemy Docs",
"!cxx": "Cplusplus.com",
"!itv": "iTV Player",
"!tmt": "Tiny Mix Tapes",
"!tmz": "TMZ",
"!pcgarage": "pcgarage",
"!gcal": "Google Calendar",
"!halopedia": "Halopedia",
"!readwriteweb": "ReadWriteWeb",
"!barnesandnoble": "Barnes & Noble",
"!alexasi": "alexa",
"!slackbuild": "Slackbuilds",
"!adlibrisfi": "AdLibris FI",
"!fcatch": "FileCatch.com",
"!scopek": "scopek",
"!uniprot": "uniprot",
"!superuser": "SuperUser",
"!dbsnp": "NCBI dbSNP",
"!thinktutorial": "Th!ink",
"!evec": "EVE-Central",
"!pricewatch": "Tweakers Pricewatch",
"!w3": "W3C",
"!wfr": "Wikipedia FR",
"!mouser": "Mouser Electronics",
"!tvrage": "TVRage",
"!evirtdes": "Estante Virtual description",
"!wfa": "Persian Wikipedia",
"!jms": "JoobMobile Support",
"!webmd": "WebMD",
"!wfi": "Finnish Wikipedia",
"!isitdown": "isup.me",
"!ieee": "IEEE Xplore Digital Library",
"!entireweb": "Entireweb",
"!ada": "Ada 2005 Manual",
"!collegeboard": "CollegeBoard",
"!drugbank": "drugbank.ca",
"!bluray": "blu-ray.com",
"!ghu": "Google HU",
"!gwpde": "Google de.Wikipedia",
"!isbnnu": "isbn.nu",
"!wikibooks": "Wikibooks",
"!atomurl": "Atomurl",
"!wp": "WordPress.org",
"!wq": "WikiQuote",
"!wr": "WordReference",
"!images": "Google Images",
"!wt": "Wiktionary",
"!wu": "Weather Underground",
"!devo": "Dev.Opera ",
"!menupages": "MenuPages",
"!psql": "PostgreSQL Docs",
"!rapgenius": "Rap Genius",
"!wikihow": "WikiHow",
"!boingboing": "boingboing",
"!cppr": "Cppreference",
"!searchcode": "searchco.de",
"!wc": "The Weather Channel",
"!wd": "Website Down",
"!wf": "fr.wikipedia.org",
"!wg": "WunderGround",
"!wm": "Wikipedia mobile",
"!pitchfork": "Pitchfork Media",
"!alternative": "AlternativeTo.net",
"!alexa": "Alexa",
"!cyberport": "Cyberport",
"!superdownloads": "SuperDownloads",
"!craigslist": "Craigslist",
"!tcgplayer": "TCGPlayer",
"!tigdb": "The Indie Game Database",
"!filext": "FILExt",
"!lwjgl": "LWJGL",
"!videohive": "VideoHive",
"!dig": "dig Lookup",
"!die": "Linux.die.net man pages",
"!dic": "Dream In Code",
"!magmawiki": "MagmaWiki",
"!groklaw": "Groklaw",
"!wiktbr": "Wikeriadur",
"!editus": "editus.lu",
"!adsref": "NASA ADS Bibliographical reference search",
"!y": "Yahoo!",
"!pkgsrc": "Pkgsrc.se",
"!myetym": "www.myetymology.com",
"!rmp": "Rate My Professor",
"!emedicine": "Medscape Reference",
"!gsbr": "Google Shopping BR",
"!geiz": "Geizhals",
"!jcpenney": "JC Penney",
"!lma": "Live Music Archive",
"!nce": "Nciku English",
"!heroku": "Heroku Devcenter",
"!gitl": "Google IT",
"!bmaps": "Bing Maps",
"!slbr": "SpeakLikeABrazilian",
"!rml": "R mailing lists archive",
"!bandcamp": "Bandcamp",
"!zerohedge": "ZeroHedge",
"!gmaps": "Google Maps",
"!springer": "Springer Link",
"!sc2ranks": "SC2Ranks",
"!g4tv": "G4TV",
"!random": "random.org",
"!like": "Thesaurus.com",
"!emule": "eMule Content Database",
"!tr": "Google Translate",
"!punchfork": "Punchfork",
"!niv": "BibleStudyTools.com",
"!thetvdb": "The TV db",
"!yaen": "Yandex",
"!essefn": "ESSEF nl",
"!nih": "NIH",
"!metal": "Encyclopedia Metallum The Metal Archives",
"!glink": "Google",
"!shbd": "Shabdkosh",
"!wikide": "Wikipedia",
"!fgf": "Flightgear Forums",
"!lisp": "Lispdoc",
"!downfor": "DownForEveryone?",
"!asn": "AS Lookup",
"!ask": "Ask.com",
"!ujc": "Internetová jazyková příručka",
"!archpkg": "ArchLinux Packages",
"!hes": "HES Data Dictionary",
"!tinypic": "TinyPic",
"!evirt": "Estante Virtual all",
"!wwend": "Worlds Without End",
"!artist": "artcyclopedia.com",
"!amo": "Firefox Addons",
"!aes": "Amazon ES",
"!adobe": "Adobe",
"!archwiki": "ArchLinux Wiki",
"!subito": "Subito.it",
"!furet": "Furet du Nord",
"!jalopnik": "Jalopnik",
"!wordreference": "WordReference",
"!ohloh": "ohloh",
"!filestube": "FilesTube",
"!myspace": "MySpace",
"!kickasstorrents": "Kick Ass Torrents",
"!build": "Build.com",
"!jeux": "JeuxVideo",
"!gtfr": "Google Translate to French",
"!moddb": "ModDB",
"!datpiff": "Datpiff",
"!mcskin": "Minecraft Skin Search",
"!as3": "AS3 Docs",
"!spanishdict": "SpanishDict",
"!softpedia": "Softpedia",
"!yahoo": "Yahoo!",
"!drupal": "Drupal",
"!safebooru": "Safebooru",
"!mercadolibre": "Mercado Libre",
"!rollingstone": "Rolling Stone",
"!viewpdf": "Online PDF Viewer",
"!wallpaper": "Google Images",
"!grooveshark": "Grooveshark",
"!netgear": "Netgear",
"!bsd": "GoogleBSD",
"!wiktfr": "Wiktionnaire",
"!webwarper": "WebWarper.com",
"!ikeafr": "Ikea FR",
"!sumotorrent": "sumotorrent",
"!railsdock": "Rails-Dock",
"!dtdeals": "DoubleTakeDeals",
"!omgu": "OMG! Ubuntu!",
"!btabs": "UltimateGuitar.com",
"!diccionari": "Diccionari de la llengua Catalana",
"!usaspending": "usaspending.gov",
"!evewiki": "EVElopedia",
"!puc": "Ubuntu Packages Search",
"!ral": "RAL color coding system",
"!hymnary": "Hymnary",
"!mimvi": "Mimvi",
"!lmddgtfy": "LMDDGTFY.NET",
"!versandapo": "VersandApo",
"!kprojects": "KDE Git Projects",
"!howtogeek": "How-To Geek",
"!metalab": "Metalab Mediawiki",
"!userstyles": "Userstyles",
"!opera": "Opera Extension",
"!file": "FileHippo",
"!gems": "RubyGems",
"!etsy": "Etsy",
"!ext": "FILExt",
"!applediscuss": "Apple Discussions",
"!ios": "iOS Developer Library",
"!linuxhcl": "Linux Hardware Compatibility List",
"!esit": "Google translate es-it",
"!lasership": "LaserShip",
"!abcnotation": "abcnotation.com",
"!iol": "IOL",
"!evirted": "Estante Virtual by publisher",
"!cultureunplugged": "CultureUnplugged.com",
"!snl": "http://snl.no/",
"!ctan": "CTAN.org",
"!sporcle": "Sporcle",
"!kol": "The KoLwiki",
"!amazonfr": "Amazon france",
"!pbs": "PBS",
"!xiami": "Xiami",
"!port": "port Lookup",
"!ebayhk": "ebay.com.hk",
"!scirus": "Scirus",
"!pbi": "Pbi Directory",
"!hackage": "HackageDB",
"!comicrocket": "Comic Rocket",
"!kos": "KingOfSat",
"!opensubtitles": "OpenSubtitles.org",
"!verticalset": "Vertical Set",
"!dribble": "dribbble",
"!oracle": "Oracle.com",
"!metafilter": "MetaFilter",
"!ordnet.dk": "Ordnet",
"!bookfinder": "Book Finder",
"!register": "The Register",
"!firmycz": "firmy.cz",
"!yandexm": "Yandex Market",
"!pic": "picsearch",
"!reuters": "Reuters",
"!cpan": "MetaCPAN",
"!tvlinks": "TVLinks",
"!hitchwiki": "Hitchwiki",
"!wtit": "Wiktionary",
"!dash": "Digital Access to Scholarship at Harvard",
"!wpthemes": "WordPress.org Themes",
"!jarvanaproject": "Jarvana",
"!cpap": "CPAP.com",
"!habr": "HabraHabr",
"!local": "Google Local",
"!ames": "Amazon España",
"!uschess": "US Chess Federation",
"!mathworld": "Wolfram MathWorld",
"!gcache": "Google Cache",
"!isbndb": "ISBNdb.com",
"!brreg": "Brønnøysundregistrene",
"!academic": "Microsoft Academic",
"!bookflavor": "Bookflavor",
"!citeul": "CikeULike",
"!aion": "Aion Wiki",
"!ox": "Oxford Dictionaries",
"!oz": "Stichting OZNB Foundation",
"!nndb": "NNDB",
"!nolo": "Nolo.com",
"!gcode": "Google Code",
"!reed": "reed.co.uk",
"!genesis": "Library Genesis",
"!yr": "yr",
"!identica": "Identi.ca",
"!lynx": "lynx.io",
"!progarchives": "Prog Archives",
"!qrz": "QRZ.com",
"!shoutitout": "Shoutitout",
"!cine": "Cinemassacre",
"!yc": "HNsearch",
"!mkz": "musik",
"!wbr": "Wikipedia Brezhoneg",
"!yf": "Yahoo Finance",
"!asksutra": "Ask Sutra",
"!codecanyon": "CodeCanyon",
"!osi": "Open Source Initiative",
"!arduino": "Arduino",
"!ose": "Open Site Explorer",
"!track": "PackageMapping",
"!mpbo": "MenuPages",
"!pond5": "Pond5",
"!chilango": "Chilango",
"!nina": "Narodowy Instytut Audiowizualny nina.gov.pl",
"!similarsitesearch": "similarsitesearch",
"!gshopping": "Google Shopping",
"!ohinternet": "ohinternet.com",
"!smashwords": "Smashwords",
"!fullwiki": "The Full Wiki",
"!tvdb": "TheTVDB.com",
"!apidockruby": "APIdock",
"!abe": "Abe's Market",
"!video": "YouTube",
"!dfwiki": "DwarfFortress Wiki",
"!google": "Google",
"!linkscape": "Open Site Explorer",
"!thwiki": "Touhouwiki English",
"!digikey": "Digi-Key",
"!gfinance": "Google Finance",
"!smallsafari": "SmallSafari",
"!ultimateguitar": "Ultimate Guitar",
"!dirae": "Dirae",
"!techiris": "The TechIRIS",
"!clusty": "Clusty",
"!latimes": "LATimes",
"!dhlde": "DHL German",
"!besch": "Bescherelle",
"!gamerankings": "GameRankings.com",
"!staples": "Staples.com",
"!fnac": "Fnac",
"!malpha": "Memory Alpha",
"!australian": "The Australian",
"!paleo": "Paleo Hacks",
"!danbooru": "Danbooru",
"!ebsg": "ebay.com.sg",
"!packagist": "Packagist",
"!software": "Download.com",
"!jigsaw": "Jigsaw",
"!msnbc": "MSNBC",
"!squidoo": "Squidoo",
"!adlibrisse": "AdLibris SE",
"!ddo": "Den Danske Ordbog",
"!phpnet": "PHP.net",
"!nzb": "NzbMatrix",
"!ddg": "DuckDuckGo",
"!scripts": "HotScripts",
"!ebayfr": "ebay.fr",
"!howtoforge": "HowtoForge",
"!webischool.org": "webischool.org",
"!cpuworld": "cpu-world.com",
"!theonion": "The Onion",
"!ebay": "eBay",
"!kat": "Kick Ass Torrents",
"!seeks": "Seeks",
"!quran": "The Noble Qur'an",
"!yv": "Yahoo",
"!drive": "Google Drive",
"!ptp": "Pass The Popcorn",
"!ebat": "ebay.at",
"!infochimps": "Infochimps",
"!jalop": "Jalopnik",
"!gth": "Google TH",
"!buej": "BU Alumni Medical Library- E-Journals",
"!appleinsider": "AppleInsider",
"!woxikon": "Woxikon",
"!node": "node.js docs",
"!weatherbug": "WeatherBug",
"!zahe": "Zahe.me",
"!macaddress": "MAC Address",
"!rdio": "Rdio",
"!nettuts": "Nettuts+",
"!tawlk": "Tawlk",
"!vala": "http://live.gnome.org/Vala",
"!csmonitor": "CSMonitor",
"!hphys": "HyperPhysics",
"!searchcpan": "CPAN",
"!recipes": "Punchfork",
"!dtag": "DebTags",
"!mspawiki": "MS Paint Adventures Wiki",
"!enl": "ebay.nl",
"!dpr": "dpreview",
"!gnz": "Google New Zealand",
"!gnu": "GNU",
"!freedict": "Free Dictionary",
"!opencv": "OpenCV documentation",
"!l1sp": "Lisp Docs",
"!gnm": "Google News mobile",
"!gnl": "Google NL",
"!gno": "Google NO",
"!osm": "OpenStreetMap",
"!translate": "Google Translate",
"!developpez": "Developpez.com",
"!orielly": "O'Reilly",
"!wresen": "WordReference",
"!ens": "Ensembl",
"!trisquel": "Trisquel",
"!dsd": "Den Store Danske",
"!eff": "Electronic Frontier Foundation",
"!yt": "YouTube",
"!isgd": "is.gd",
"!digitalspy": "DigitalSpy",
"!gamecheats": "CheatCC",
"!allmusic": "AllMusic",
"!name": "Name.com ",
"!unix": "The Unix Tree",
"!savannah": "Savannah",
"!use": "Unix & Linux StackExchange",
"!dmoz": "dmoz",
"!allposters": "All Posters",
"!perlmod": "p3rl.org",
"!sigma": "Sigma Aldritch Catalog",
"!greeksubtitles": "Greek Subtitles Project",
"!h33t": "h33t",
"!fool": "Fool.com",
"!sapo": "SAPO",
"!fonplus": "Find People on Plus",
"!zemljevid": "Zemljevid Najdi.si",
"!amde": "Amazon.de",
"!dplb": "DPLB",
"!watch": "YouTube",
"!synonyms": "Thesaurus.com",
"!drupalapi": "Drupal API",
"!french": "French Language and Usage",
"!stabs": "Songsterr",
"!jamendo": "Jamendo",
"!costco": "Costco.com",
"!nf": "netflix",
"!ne": "Newegg",
"!nx": "networkx",
"!kelkoo": "Kelkoo UK",
"!stumbleupon": "StumbleUpon",
"!gmg": "Green Man Gaming",
"!godaddy": "GoDaddy",
"!efr": "ebay.fr",
"!wca": "Wikipedia CA",
"!cultofmac": "Cult of Mac",
"!kong": "Kongregate",
"!mywot": "Web of Trust",
"!wcs": "Wikipedia Cs",
"!mafia": "mafiatoday",
"!safeon": "DDG Safesearch On",
"!head-fi": "Head-Fi",
"!mpch": "MenuPages",
"!guildwiki": "Guild Wiki",
"!osvdb": "The Open Source Vulnerability Database",
"!redz": "RedZ",
"!wwwjdic": "WWWJDIC",
"!fotobanka": "FotoBanka",
"!corriere": "Corriere della Sera",
"!furaffinity": "FurAffinity",
"!readthedocs": "Read the Docs",
"!ponsde": "PONS",
"!mmls": "Minuteman Library Network",
"!enes": "Google translate en-es",
"!ubuntuusers": "Ubuntuusers",
"!enel": "Google translate en-el",
"!eneo": "Sonja's English-Esperanto Dictionary",
"!biblegateway": "BibleGateway",
"!mysql": "MySQL.com",
"!perl": "p3rl.org",
"!nhl": "NHL.com",
"!wrfe": "Wordreference fr->en",
"!ukp": "UK Parliament",
"!shelfari": "Shelfari",
"!mapquest": "Mapquest",
"!ffm": "Firefox Marketplace",
"!touhou": "Touhou Wiki",
"!elexikon": "Elektronik-Lexikon",
"!gmail": "GMail.com",
"!mill": "Million short",
"!jpg": "http://jpg.to/",
"!amcn": "Amazon.cn",
"!wru": "Wikipedia RU",
"!chow": "Chowhound",
"!amca": "Amazon.ca",
"!mozbrowser": "MozBrowser.nl",
"!wayback": "Wayback Machine",
"!imdbp": "IMDB pro",
"!jar": "FindJar.com",
"!folktunefinder": "FolkTuneFinder",
"!robtex": "Robtex",
"!esen": "Google translate es-en",
"!slickdeals": "SlickDeals",
"!movieweb": "Movie Web",
"!batoto": "Batoto",
"!miniinthebox": "MiniInTeBox",
"!feedbooks": "Feedbooks",
"!tnw": "The Next Web",
"!tnt": "TNT",
"!oca": "Open Clip Art Library",
"!pspgen": "PSP GEN",
"!is": "http://ilmainensanakirja.fi/",
"!pten": "Google Translate pt-en",
"!iso": "DistroWatch",
"!iw": "InstantWatcher",
"!ocw": "MIT OpenCourseWare",
"!ix": "Ixquick Alias",
"!fowiki": "The Vault/Nukapedia/Fallout Wiki",
"!aljazeera": "Aljazeera English",
"!bangs": "DuckDuckGo",
"!charitynavigator": "Charity Navigator",
"!qtc": "Nokia QT Docs",
"!simplyhired": "SimplyHired",
"!multicolr": "Multicolr Search Lab",
"!gse": "Google SE",
"!ebuild": "Gentoo Packages",
"!dispt": "Dispostable",
"!poems": "The Poetry Foundation",
"!eow": "英辞郎 on the WEB Pro",
"!amazon": "Amazon.com",
"!mpdc": "MenuPages",
"!uitm": "Uitmuntend",
"!gsa": "GSA Advantage",
"!localch": "local.ch",
"!thefutoncritic": "The Futon Critic",
"!cran": "CRAN",
"!flattr": "Flattr",
"!goear": "Goear",
"!awimg": "All the web, yahoo version",
"!pythondev": "Python dev doc",
"!delicious": "delicious",
"!wikinews": "Wikinews",
"!albumartcd": "Albumart Music",
"!ya": "Yandex",
"!tivo": "TiVo",
"!selfhtmlwiki": "SELFHTML wiki",
"!othes": "http://openthes-es.berlios.de/",
"!sos": "Software OpenSuse",
"!docubufr": "Documentation Ubuntu-fr",
"!ciu": "Can I use...",
"!auk": "Amazon UK",
"!slatefr": "Slate.fr",
"!brew": "Homebrew",
"!aur": "Arch Linux",
"!cia": "CIA World Factbook",
"!snuson": "SnusOn",
"!gblogs": "Google Blogs",
"!hn": "HNsearch",
"!ikea": "Ikea",
"!edgar": "EDGAR",
"!ck12": "cK-12.org",
"!linuxfr": "LinuxFr.org",
"!mightyape": "Mighty Ape",
"!vimeo": "Vimeo",
"!fox": "Fox News",
"!altto": "AlternativeTo",
"!gtde": "Google Translate to German",
"!octopart": "Octopart",
"!creativecowlib": "Creative Cow Library",
"!blueletterbible": "Blue Letter Bible",
"!fbrecipes": "Recipes: Fireburst",
"!uu": "Ubuntuusers",
"!commons": "Wikimedia Commons",
"!rubygems": "RubyGems",
"!bitesizegcse": "BBC GCSE Bitesize",
"!x86": "x86 Opcode & Instruction Reference",
"!ug": "Ultimate Guitar",
"!ud": "Urban Dictionary",
"!django": "Django Docs",
"!maany": "Almaany",
"!xmarks": "Xmarks | Boomark Sync and Search",
"!freshports": "FreshPorts",
"!lnu": "Library.nu",
"!ftram": "File Tram",
"!zvon": "ZVON.org",
"!playasia": "Play-asia",
"!iops": "Iops",
"!pminister": "Priceminister",
"!rbl": "RBL Lookup",
"!gscholar": "Google Scholar",
"!acronym": "Acronyms",
"!piratebay": "The Pirate Bay",
"!som": "Spirit of Metal",
"!ktechbase": "KDE TechBase",
"!lonelyplanet": "lonelyplanet",
"!braumeister": "Braumeister",
"!vossey": "Vossey.com",
"!arxiv": "arXiv",
"!metacritic": "Metacritic",
"!lyriki": "Lyriki",
"!blinders": "DuckDuckGo Blinders",
"!tineye": "Tineye",
"!rdoc": "RubyDoc.info",
"!yelp": "Yelp",
"!jeuxvideo.com": "jeuxvideo.com",
"!./": "Slashdot",
"!djangome": "Django Docs",
"!etymology": "EtymOnline.com",
"!pipl": "Pipl",
"!parlysearch": "ParlySearch",
"!baixaki": "Baixaki",
"!instructables ": "Instructables",
"!glt": "Google LT",
"!ruby": "Ruby-lang.org",
"!techjungle": "Tech Jungle",
"!ebayit": "ebay.it",
"!bnews": "Bing News",
"!bnf": "Bibliothèque nationale de France ",
"!gulesider": "Gule Sider",
"!newyorker": "The New Yorker",
"!wikit": "Wikitravel",
"!jisho": "Jisho",
"!portableapps": "PortableApps.com",
"!smogon": "Smogon University",
"!punguzo": "Punguzo.com",
"!apertium": "Appertium",
"!twitter": "Twitter",
"!omq": "Open.MapQuest.com",
"!wikic": "Wikimedia Commons",
"!wikia": "Wikia",
"!netflix": "Netflix current one is broken",
"!pinterest": "pinterest.com/",
"!recycle": "Earth911.com",
"!repubblica": "la Repubblica",
"!gri": "Google Reverse Image Search",
"!courant": "Hartford Courant",
"!cooksi": "Cook's Illustrated",
"!gro": "Google RO",
"!posix": "The Open Group",
"!minecraft": "Minecraft Wiki",
"!gru": "Google RU",
"!grv": "Grooveshark",
"!flickr": "Flickr",
"!xing": "Xing.de",
"!fssc": "Code: Fireburst Specialized Search",
"!safeoff": "Safe search off",
"!usps": "USPS",
"!stumble": "StumbleUpon",
"!pyramid": "Pyramid Web Application Development Framework",
"!stuffnz": "stuff.co.nz",
"!amuk": "Amazon.co.uk",
"!alternativeto": "AlternativeTo",
"!pandora": "Pandora",
"!hitta": "Hitta",
"!cspan": "C-SPAN",
"!gratefuldead": "Grateful Dead Concerts",
"!qur": "The Noble Qur'an",
"!seslisozluk": "sesli sözlük",
"!vagalume": "Vagalume",
"!ricardo": "www.ricardo.ch",
"!mapsfr": "Google Maps FR",
"!swipl": "SWI-Prolog",
"!basesearch": "BASE Bielefeld Academic Search Engine",
"!eu3": "Europa Universalis III Wiki",
"!zf": "http://framework.zend.com",
"!rom": "Emuparadise",
"!kolw": "Kingdom of Loathing Wiki",
"!cheatcodes": "CheatCC",
"!ovi": "Ovi Store",
"!mbsdman": "MirBSD Manpages",
"!ord": "Ord.se",
"!flix": "http://www.flixster.com/",
"!roto": "Rotoworld",
"!hayoo": "Hayoo",
"!kuler": "Adobe Kuler",
"!jaba": "JABA and JEAB articles",
"!image": "Google Images",
"!niif": "NIIF Institute",
"!microsoft": "Microsoft",
"!geegain": "Geegain Search",
"!tpp": "ThePirateParty TPB Mirror",
"!dict": "DICT.org",
"!tknowledge": "True Knowledge",
"!jobs": "Indeed",
"!gza": "Google ZA",
"!allrecipes": "All Recipes",
"!rogerebert": "Roger Ebert",
"!redflagdeals": "RedFlagDeals.com",
"!dpackages": "Debian Packages",
"!greplin": "Greplin",
"!gephi": "Gephi",
"!debml": "Debian Mailing Lists",
"!dealextreme": "DealExtreme",
"!tgdict": "tangorin",
"!dpkg": "Debian Packages",
"!archived": "Wayback Machine",
"!ark": "Intel Ark",
"!occuprint": "Occuprint",
"!ars": "Ars Technica",
"!gban": "GameBanana",
"!emag": "eMAG",
"!locita": "Locita",
"!homebase": "Homebase",
"!adn": "app.net",
"!sitealytics": "sitealytics.com",
"!darklyrics": "DarkLyrics.com",
"!grepcpan": "CPAN -> GREP",
"!mashable": "Mashable",
"!adc": "Apple Developer Center",
"!torrentz": "Torrentz",
"!posten": "Posten",
"!osxdaily": "Osxdaily",
"!vgmdb": "VGMdb",
"!ads": "SAO/NASA ADS",
"!shop": "Yahoo! Shopping",
"!kobo": "Kobo",
"!dlss": "Summon Duke",
"!tz": "timeanddate",
"!peppermintos": "Peppermint OS forums",
"!slyrics": "SONG LYRICS",
"!patft": "US Patent Database Search",
"!tt": "Tokyo Toshokan",
"!tw": "Twitter",
"!tv": "Yahoo! TV",
"!javascript": "JavaScript Docs",
"!tk": "True Knowledge",
"!slackfind": "Slackfind",
"!vatera": "Vatera",
"!imeem": "Imeem",
"!tc": "Techcrunch",
"!buscape": "Buscapé",
"!tg": "ThinkGeek",
"!wikipainting": "wikipaintings",
"!mydealz": "myDealZ",
"!tmark": "Justia.com trademarks",
"!watchanime": "Watch Anime On",
"!vbox7": "Vbox7",
"!tanka": "Tankafetast",
"!pyside": "PySide",
"!maxthon": "Maxthon 3 Extension",
"!ufrj": "Universidade Federal do Rio de Janeiro",
"!francesurf": "FranceSurf",
"!urban": "Urban Dictionary",
"!anagram": "One Across",
"!proofwiki": "ProofWiki",
"!omgubuntu": "OMG! Ubuntu!",
"!pypi": "Pypi.python.org",
"!d20pfsrd": "Pathfinder SRD",
"!lxr": "Linux Cross Reference",
"!fsfe": "Free Software Foundation Europe",
"!wiktes": "Wikcionario",
"!redbox": "RedBox",
"!dbugs": "Debian Bugs",
"!nbsdman": "NetBSD manual pages",
"!rso": "R questions on Stack Overflow",
"!clubic": "Clubic",
"!karmadecay": "Karma Decay",
"!gethuman": "GetHuman",
"!openprocessing": "OpenProcessing",
"!cocoa": "Apple Developer",
"!rdns": "Reverse DNS",
"!liberia": "Libreria Universitaria",
"!gmx": "Google MX",
"!gmy": "Google MY",
"!ein": "ebay.in",
"!thestar": "The Star",
"!demonoid": "Demonoid",
"!erowid": "Erowid",
"!eie": "ebay.ie",
"!hastane": "Hastane",
"!shutterstock": "Shutterstock",
"!eit": "ebay.it",
"!mc": "Metacritic",
"!wetter": "Wetter.com",
"!peeron": "Peeron",
"!smh": "SMH.com.au",
"!smg": "Search My Gmail",
"!start": "START",
"!golang": "Go Programming Language",
"!edgart": "Edgar",
"!onr": "Oh No Robot",
"!ficly": "ficly",
"!rs4": "rs.4chan.org",
"!argos": "Argos",
"!gsg": "Google SG",
"!speed": "Speed.cd",
"!gsc": "Google Scholar",
"!khan": "Khan Academic",
"!elreg": "The Register",
"!gsl": "Google SL",
"!gsk": "Google SK",
"!mediawiki": "MediaWiki",
"!openports": "OpenPorts",
"!bacon": "Oracle of Bacon",
"!raamattu": "Koivuniemen Raamattuhaku",
"!pci": "http://www.pcinpact.com",
"!googleimages": "Google Images",
"!uncyclopedia": "Uncyclopedia",
"!howstuffworks": "How Stuff Works",
"!wolframalpha": "WolframAlpha",
"!circuitcity": "Circuit City",
"!esvonline": "ESVonline",
"!bulba": "Bulbapedia",
"!findlunchin": "FindLunchIn",
"!scrabblelookup": "scrabblelookup",
"!ipt": "IP Torrents",
"!filehippo": "FileHippo",
"!urbandictionary": "Urban Dictionary",
"!ptv": "PipocasTV",
"!mcwiki": "Minecraft Wiki",
"!wikipaintings": "wikipaintings",
"!theregister": "the register",
"!ebau": "ebay.com.au",
"!linguee": "Linguee",
"!disney": "Disney.com",
"!waffles": "Waffles",
"!wpplugins": "WordPress.org Plugins",
"!airbnb": "airbnb",
"!walmart": "WalMart",
"!longurl": "LongURL.org",
"!iafd": "internet adult film database",
"!salix": "SalixOS",
"!wsv": "Wikipedia SV",
"!activestate": "ActiveState.com",
"!scholar": "Google Scholar",
"!wsl": "Wikipedia SL",
"!bbcfood": "BBC Food",
"!wsj": "The Wall Street Journal",
"!norsk": "Norskordbok | UiO",
"!nicolinux": "Le blog de Nicolinux",
"!openbsd": "OpenBSD",
"!magiccards": "Magiccards.info",
"!euk": "ebay.co.uk",
"!tig": "TIGSource",
"!hp": "HP",
"!kcls": "King County Library System"
}
| mit | -1,062,460,377,060,173,600 | 30.48229 | 71 | 0.556215 | false |
CRS-support/ftw | test/integration/test_logcontains.py | 1 | 1026 | from ftw import logchecker, testrunner
import pytest
import random
class LoggerTestObj(logchecker.LogChecker):
def __init__(self):
self.do_nothing = False
def generate_random_logs(self):
if self.do_nothing:
return []
else:
return [str(self.start) + ' rule-id-' +
str(random.randint(10, 99))]
def get_logs(self):
logs = self.generate_random_logs()
return logs
@pytest.fixture
def logchecker_obj():
"""
Returns a LoggerTest Integration object
"""
return LoggerTestObj()
def test_logcontains_withlog(logchecker_obj, ruleset, test):
runner = testrunner.TestRunner()
for stage in test.stages:
runner.run_stage(stage, logchecker_obj)
def test_logcontains_nolog(logchecker_obj, ruleset, test):
logchecker_obj.do_nothing = True
runner = testrunner.TestRunner()
with(pytest.raises(AssertionError)):
for stage in test.stages:
runner.run_stage(stage, logchecker_obj)
| apache-2.0 | -6,374,568,604,469,735,000 | 24.02439 | 60 | 0.6423 | false |
linjinjin123/leetcode | 剑指offer/平衡二叉树.py | 1 | 1099 | # 题目描述
# 输入一棵二叉树,判断该二叉树是否是平衡二叉树。
# -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def IsBalanced_Solution(self, pRoot):
# write code here
if pRoot == None: return True
return abs(self.get_depth(pRoot.left) - self.get_depth(pRoot.right)) <= 1 \
and self.IsBalanced_Solution(pRoot.left) \
and self.IsBalanced_Solution(pRoot.right)
def get_depth(self, root):
if root == None: return 0
stack = []
depth = 1
max_depth = 1
while root != None or len(stack) > 0:
if root != None:
stack.append([root, depth])
root = root.left
depth += 1
else:
root, depth = stack.pop()
if depth > max_depth:
max_depth = depth
root = root.right
depth += 1
return max_depth | mit | -3,442,280,454,682,868,000 | 28.885714 | 83 | 0.480383 | false |
samj1912/picard | picard/ui/tagsfromfilenames.py | 1 | 5318 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import os.path
from PyQt5 import QtCore, QtWidgets
from picard import config
from picard.ui.util import StandardButton
from picard.ui import PicardDialog
from picard.ui.ui_tagsfromfilenames import Ui_TagsFromFileNamesDialog
from picard.util.tags import display_tag_name
class TagsFromFileNamesDialog(PicardDialog):
defaultsize = QtCore.QSize(560, 400)
options = [
config.TextOption("persist", "tags_from_filenames_format", ""),
]
def __init__(self, files, parent=None):
super().__init__(parent)
self.ui = Ui_TagsFromFileNamesDialog()
self.ui.setupUi(self)
items = [
"%artist%/%album%/%title%",
"%artist%/%album%/%tracknumber% %title%",
"%artist%/%album%/%tracknumber% - %title%",
"%artist%/%album% - %tracknumber% - %title%",
"%artist% - %album%/%title%",
"%artist% - %album%/%tracknumber% %title%",
"%artist% - %album%/%tracknumber% - %title%",
]
tff_format = config.persist["tags_from_filenames_format"]
if tff_format not in items:
selected_index = 0
if tff_format:
items.insert(0, tff_format)
else:
selected_index = items.index(tff_format)
self.ui.format.addItems(items)
self.ui.format.setCurrentIndex(selected_index)
self.ui.buttonbox.addButton(StandardButton(StandardButton.OK), QtWidgets.QDialogButtonBox.AcceptRole)
self.ui.buttonbox.addButton(StandardButton(StandardButton.CANCEL), QtWidgets.QDialogButtonBox.RejectRole)
self.ui.buttonbox.accepted.connect(self.accept)
self.ui.buttonbox.rejected.connect(self.reject)
self.ui.preview.clicked.connect(self.preview)
self.ui.files.setHeaderLabels([_("File Name")])
self.files = files
self.items = []
for file in files:
item = QtWidgets.QTreeWidgetItem(self.ui.files)
item.setText(0, os.path.basename(file.filename))
self.items.append(item)
self._tag_re = re.compile(r"(%\w+%)")
self.numeric_tags = ('tracknumber', 'totaltracks', 'discnumber', 'totaldiscs')
def parse_response(self):
tff_format = self.ui.format.currentText()
columns = []
format_re = ['(?:^|/)']
for part in self._tag_re.split(tff_format):
if part.startswith('%') and part.endswith('%'):
name = part[1:-1]
columns.append(name)
if name in self.numeric_tags:
format_re.append('(?P<' + name + r'>\d+)')
elif name in ('date'):
format_re.append('(?P<' + name + r'>\d+(?:-\d+(?:-\d+)?)?)')
else:
format_re.append('(?P<' + name + '>[^/]*?)')
else:
format_re.append(re.escape(part))
format_re.append(r'\.(\w+)$')
format_re = re.compile("".join(format_re))
return format_re, columns
def match_file(self, file, tff_format):
match = tff_format.search(file.filename.replace('\\','/'))
if match:
result = {}
for name, value in match.groupdict().items():
value = value.strip()
if name in self.numeric_tags:
value = value.lstrip("0")
if self.ui.replace_underscores.isChecked():
value = value.replace('_', ' ')
result[name] = value
return result
else:
return {}
def preview(self):
tff_format, columns = self.parse_response()
self.ui.files.setHeaderLabels([_("File Name")] + list(map(display_tag_name, columns)))
for item, file in zip(self.items, self.files):
matches = self.match_file(file, tff_format)
for i, column in enumerate(columns):
item.setText(i + 1, matches.get(column, ''))
self.ui.files.header().resizeSections(QtWidgets.QHeaderView.ResizeToContents)
self.ui.files.header().setStretchLastSection(True)
def accept(self):
tff_format, columns = self.parse_response()
for file in self.files:
metadata = self.match_file(file, tff_format)
for name, value in metadata.items():
file.metadata[name] = value
file.update()
config.persist["tags_from_filenames_format"] = self.ui.format.currentText()
super().accept()
| gpl-2.0 | 7,357,525,650,345,461,000 | 40.523438 | 113 | 0.598307 | false |
jplusplus/thenmap-v0 | generators/utils/guess-nation-codes.py | 1 | 2869 | # coding=utf-8
#Try and create nation codes (class names) from nation names in a csv
import csv
import argparse
import os.path
import sys
import shlex
#Check if file exists
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
#Define command line arguments
parser = argparse.ArgumentParser(description='Try and create Thenmap nation codes (typically two letter iso codes) from nation names in a csv.')
#Input file
parser.add_argument("-i", "--input", dest="infile", required=True,
help="input file", metavar="FILE",
type=lambda x: is_valid_file(parser,x))
#Output file
parser.add_argument("-o", "--output", dest="outfile",
help="output file", metavar="FILE")
#Column
parser.add_argument("-c", "--column", dest="column",
help="column to search and replace, starting from 0", type=int, default=0)
args = parser.parse_args()
inputFile = args.infile #"/home/leo/Världen/demo/patents/raw-pre.csv"
if args.outfile is None:
print "No output file given. Really overwrite input file? [y/N]"
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
outputFile = inputFile
else:
if os.path.isfile(args.outfile):
print "File %s already exists. Overwrite? [y/N]" % args.outfile
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
outputFile = args.outfile
indataColumn = args.column
outdataColumn = indataColumn
keyDict = {}
try:
with open('nation-keys.csv', 'rb') as csvfile:
keyreader = csv.reader(csvfile,delimiter=',',quotechar='"')
for row in keyreader:
#Swedish name -> code
if row[1]:
keyDict[row[1]] = row[0]
#English name -> code
if row[2]:
keyDict[row[2]] = row[0]
#Alisases -> code
if row[3]:
#Use csv module to split string by comma, respecting quotes
aliases = csv.reader([row[3]],skipinitialspace=True)
for a in aliases.next():
keyDict[a] = row[0]
#ISO alpha 3 ("CHE")
if row[4]:
keyDict[row[4]] = row[0]
#OECD ("CHE: Switzerland")
if row[5]:
keyDict[row[5]] = row[0]
except IOError:
print ("Could not open key file")
#print keyDict
outdata = []
try:
with open(inputFile, 'rb') as csvfile:
datacsv = csv.reader(csvfile,delimiter=',',quotechar='"')
firstRow = True
for row in datacsv:
if firstRow:
firstRow = False
else:
nationname = row[indataColumn].strip()
if nationname in keyDict:
row[outdataColumn] = keyDict[nationname]
else:
print "Could not find %s" % nationname
outdata.append(row)
try:
with open(outputFile, 'wb') as csvfile:
writer = csv.writer(csvfile,delimiter=',',quotechar='"')
for row in outdata:
writer.writerow(row)
except IOError:
print ("Could not open output file")
except IOError:
print ("Could not open input file")
| gpl-2.0 | -8,708,814,281,626,982,000 | 24.837838 | 144 | 0.664226 | false |
tensorflow/lingvo | lingvo/core/task_scheduler.py | 1 | 11531 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-task task sampling schedules."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import early_stop
import numpy as np
class TaskScheduler(base_layer.BaseLayer):
"""Generic multi-task scheduler.
Subclasses should override the `Sample` method to return a task string given
a step. All of the task strings as well as additional hyperparameters needed
by `Sample` should be exposed and stored in the params. `Sample` should also
update `cur_probs`.
"""
@classmethod
def Params(cls):
"""Parameters for this task scheduler."""
p = super().Params()
p.name = 'task_scheduler'
return p
def __init__(self, params):
super().__init__(params)
self.cur_probs = None
self.SetVariableFree()
def Sample(self, current_step):
raise NotImplementedError('Abstract method')
class AdaptiveScheduler(TaskScheduler):
"""Tasks with low scores will be sampled more often.
Scores are expected to be non-negative. Larger scores are better."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of tasks')
p.Define('expected', [], 'List of final expected scores')
p.Define('mh_a', early_stop.MetricHistory.Params(), '')
p.Define('mh_b', early_stop.MetricHistory.Params(), '')
p.Define(
'epsilon', 0.05, 'Regularizarion term. A large epsilon will lead'
'to a more uniform task distribution.')
p.Define('alpha', 1.0, 'Normalized task scores are raised to this power.')
return p
def __init__(self, params):
super().__init__(params)
if len(self.params.tasks) != 2 or len(self.params.expected) != 2:
raise ValueError('Only two tasks are supported by this scheduler.')
if self.params.epsilon < 0:
raise ValueError('Epsilon should be positive.')
self.tasks = self.params.tasks
self.last_scores = [0.0] * 2
self._metric_histories = [
early_stop.MetricHistory(self.params.mh_a),
early_stop.MetricHistory(self.params.mh_b)
]
def getMetricHistories(self):
# If too slow, consider another implementation.
# TODO(sebjean) Time file reading and change behaviour if too long.
for index, mh in enumerate(self._metric_histories):
try:
with tf.io.gfile.GFile(mh.hist_file) as f:
lines = f.readlines()
except tf.errors.NotFoundError:
tf.logging.warning('File not found. '
'Expected at start of training only.')
score, lines = 0.0, []
if lines:
try:
score = lines[-1].split()[-1]
except IndexError:
tf.logging.warning(
'IndexError. Your history file may be corrupted.')
score = 0.0
self.last_scores[index] = float(score)
class SimpleAdaptiveScheduler(AdaptiveScheduler):
"""Simple adaptive scheduler.
A task with a normalized score of `s` is approximately weighted as `1 - s`.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 + epsilon - min(1, score / expected)**alpha.
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1 + eps - min(1, score / self.params.expected[index])**alpha
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class InverseRatioAdaptiveScheduler(AdaptiveScheduler):
"""Inverse ratio adaptive scheduler.
Tasks are approximately weighed as the inverse of their normalized scores.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 / (min(1, score / expected)**alpha + epsilon)
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1.0 / (min(1, score / self.params.expected[index])**alpha + eps)
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ShiftedExponentialScheduler(TaskScheduler):
"""The unnormalized score of each task follows a shifted exponential function.
Generalizes the constant, exponential and sigmoid
schedules described in "Scheduled Multi-Task Learning: From Syntax to
Translation" (Kiperwasser and Ballesteros).
https://arxiv.org/pdf/1804.08915.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'alpha', 0, 'Controls the rate at which the schedule changes. '
'A large alpha will lead to fast convergence toward final values.')
p.Define(
'task_probs', [], 'List of 2-tuples (task, prob). For non-constant'
'schedulers, prob is a tuple of the form (init_prob, final_prob).')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_probs, list)
self.tasks = []
self._descriptors = []
def Sample(self, current_step):
"""Sample a task.
Given an input [a, b] and a rate `alpha`, the unnormalized
score of eack task is a + b * exp(-alpha * t).
Args:
current_step: int. Current time step.
Returns:
str, the name of the sampled task.
"""
probs = [
a + b * np.exp(-self.params.alpha * current_step)
for a, b in self._descriptors
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ConstantScheduler(ShiftedExponentialScheduler):
"""Constant schedule. Tasks are sampled from a fixed probability distribution.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value, 0))
class ExponentialScheduler(ShiftedExponentialScheduler):
"""Exponential schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], value[0] - value[1]))
class SigmoidScheduler(ShiftedExponentialScheduler):
"""Sigmoid schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (2 * p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], 2 * value[0] - value[1]))
class RoundRobinScheduler(TaskScheduler):
"""Deterministic sequential schedule."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of task names. No repetitions allowed.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.tasks, list)
self.tasks = sorted(self.params.tasks)
self.n_tasks = len(self.tasks)
self.cur_probs = [1. / self.n_tasks] * self.n_tasks # For summary
self.next_task_idx = 0
def Sample(self, current_step):
"""Sample a task."""
sampled_task = self.tasks[self.next_task_idx]
self.next_task_idx = (self.next_task_idx + 1) % self.n_tasks
return sampled_task
class SequentialScheduler(TaskScheduler):
"""Deterministic schedule that stays a fixed number of steps on each task."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'task_steps', [], 'List of tuples of (task_name, steps_for_task). Goes '
'through list sequentially in the specified order, staying '
'steps_for_task steps on task_name. On completing the schedule, '
'remains on the final task for the rest of the time. Assumes '
'p.task_global_step is False.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_steps, list)
assert self.params.task_steps
self.task_steps = []
for (name, steps) in self.params.task_steps:
assert steps > 0
if self.task_steps:
self.task_steps.append((name, steps + self.task_steps[-1][1]))
else:
self.task_steps.append((name, steps))
self.n_tasks = len(self.task_steps)
self.task_idx = 0
self.cur_probs = [1] + [0] * (self.n_tasks - 1) # For summary
def Sample(self, current_step):
"""Sample a task."""
sampled_task, to_step = self.task_steps[self.task_idx]
if current_step >= to_step and self.task_idx < self.n_tasks - 1:
self.task_idx += 1
sampled_task = self.task_steps[self.task_idx][0]
self.cur_probs[self.task_idx - 1] = 0
self.cur_probs[self.task_idx] = 1
return sampled_task
class PieceWiseScheduler(TaskScheduler):
"""Piecewise scheduler using different scheduling strategies."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'schedule_steps', [], 'List of tuples of (schedule_class_params, '
'number of steps to use this schedule class)')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.schedule_steps, list)
self.schedule_steps = []
self.schedule_params = []
for (cls_params, steps) in self.params.schedule_steps:
if self.schedule_steps:
self.schedule_steps.append(steps + self.schedule_steps[-1])
else:
self.schedule_steps.append(steps)
self.schedule_params.append(cls_params)
self.CreateChildren('schedules', self.schedule_params)
self.n_schedules = len(self.schedule_steps)
self.schedule_idx = 0
self.task_step_offset = 0
self.cur_probs = self.schedules[0].cur_probs
def Sample(self, current_step):
"""Sample a task."""
to_step = self.schedule_steps[self.schedule_idx]
if current_step >= to_step and self.schedule_idx < self.n_schedules - 1:
self.task_step_offset = to_step
self.schedule_idx += 1
cur_schedule = self.schedules[self.schedule_idx]
sampled_task = cur_schedule.Sample(current_step - self.task_step_offset)
self.cur_probs = cur_schedule.cur_probs
return sampled_task
| apache-2.0 | -3,984,654,347,267,109,000 | 30.164865 | 80 | 0.652155 | false |
daevaorn/sentry | src/sentry/models/group.py | 1 | 11113 | """
sentry.models.group
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import math
import six
import time
import warnings
from base64 import b16decode, b16encode
from datetime import timedelta
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import buffer
from sentry.constants import (
DEFAULT_LOGGER_NAME, LOG_LEVELS, MAX_CULPRIT_LENGTH, EVENT_ORDERING_KEY,
)
from sentry.db.models import (
BaseManager, BoundedIntegerField, BoundedPositiveIntegerField,
FlexibleForeignKey, Model, GzippedDictField, sane_repr
)
from sentry.utils.http import absolute_uri
from sentry.utils.strings import truncatechars, strip
# TODO(dcramer): pull in enum library
class GroupStatus(object):
UNRESOLVED = 0
RESOLVED = 1
MUTED = 2
PENDING_DELETION = 3
DELETION_IN_PROGRESS = 4
PENDING_MERGE = 5
class GroupManager(BaseManager):
use_for_related_fields = True
def from_kwargs(self, project, **kwargs):
from sentry.event_manager import EventManager
manager = EventManager(kwargs)
manager.normalize()
return manager.save(project)
def add_tags(self, group, tags):
from sentry.models import TagValue, GroupTagValue
project_id = group.project_id
date = group.last_seen
for tag_item in tags:
if len(tag_item) == 2:
(key, value), data = tag_item, None
else:
key, value, data = tag_item
buffer.incr(TagValue, {
'times_seen': 1,
}, {
'project_id': project_id,
'key': key,
'value': value,
}, {
'last_seen': date,
'data': data,
})
buffer.incr(GroupTagValue, {
'times_seen': 1,
}, {
'group_id': group.id,
'project_id': project_id,
'key': key,
'value': value,
}, {
'last_seen': date,
})
class Group(Model):
"""
Aggregated message which summarizes a set of Events.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', null=True)
logger = models.CharField(
max_length=64, blank=True, default=DEFAULT_LOGGER_NAME, db_index=True)
level = BoundedPositiveIntegerField(
choices=LOG_LEVELS.items(), default=logging.ERROR, blank=True,
db_index=True)
message = models.TextField()
culprit = models.CharField(
max_length=MAX_CULPRIT_LENGTH, blank=True, null=True,
db_column='view')
num_comments = BoundedPositiveIntegerField(default=0, null=True)
platform = models.CharField(max_length=64, null=True)
status = BoundedPositiveIntegerField(default=0, choices=(
(GroupStatus.UNRESOLVED, _('Unresolved')),
(GroupStatus.RESOLVED, _('Resolved')),
(GroupStatus.MUTED, _('Muted')),
), db_index=True)
times_seen = BoundedPositiveIntegerField(default=1, db_index=True)
last_seen = models.DateTimeField(default=timezone.now, db_index=True)
first_seen = models.DateTimeField(default=timezone.now, db_index=True)
first_release = FlexibleForeignKey('sentry.Release', null=True,
on_delete=models.PROTECT)
resolved_at = models.DateTimeField(null=True, db_index=True)
# active_at should be the same as first_seen by default
active_at = models.DateTimeField(null=True, db_index=True)
time_spent_total = BoundedIntegerField(default=0)
time_spent_count = BoundedIntegerField(default=0)
score = BoundedIntegerField(default=0)
is_public = models.NullBooleanField(default=False, null=True)
data = GzippedDictField(blank=True, null=True)
objects = GroupManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupedmessage'
verbose_name_plural = _('grouped messages')
verbose_name = _('grouped message')
permissions = (
("can_view", "Can view"),
)
index_together = (
('project', 'first_release'),
)
__repr__ = sane_repr('project_id')
def __unicode__(self):
return "(%s) %s" % (self.times_seen, self.error())
def save(self, *args, **kwargs):
if not self.last_seen:
self.last_seen = timezone.now()
if not self.first_seen:
self.first_seen = self.last_seen
if not self.active_at:
self.active_at = self.first_seen
if self.message:
# We limit what we store for the message body
self.message = self.message.splitlines()[0][:255]
super(Group, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri(reverse('sentry-group', args=[
self.organization.slug, self.project.slug, self.id]))
@property
def event_set(self):
from sentry.models import Event
return Event.objects.filter(group_id=self.id)
@property
def avg_time_spent(self):
if not self.time_spent_count:
return
return float(self.time_spent_total) / self.time_spent_count
def is_over_resolve_age(self):
resolve_age = self.project.get_option('sentry:resolve_age', None)
if not resolve_age:
return False
return self.last_seen < timezone.now() - timedelta(hours=int(resolve_age))
def is_muted(self):
return self.get_status() == GroupStatus.MUTED
def is_resolved(self):
return self.get_status() == GroupStatus.RESOLVED
def get_status(self):
# XXX(dcramer): GroupSerializer reimplements this logic
from sentry.models import GroupSnooze
if self.status == GroupStatus.MUTED:
try:
snooze = GroupSnooze.objects.get(group=self)
except GroupSnooze.DoesNotExist:
pass
else:
# XXX(dcramer): if the snooze row exists then we need
# to confirm its still valid
if snooze.until > timezone.now():
return GroupStatus.MUTED
else:
return GroupStatus.UNRESOLVED
if self.status == GroupStatus.UNRESOLVED and self.is_over_resolve_age():
return GroupStatus.RESOLVED
return self.status
def get_share_id(self):
return b16encode('{}.{}'.format(self.project_id, self.id)).lower()
@classmethod
def from_share_id(cls, share_id):
try:
project_id, group_id = b16decode(share_id.upper()).split('.')
except ValueError:
raise cls.DoesNotExist
return cls.objects.get(project=project_id, id=group_id)
def get_score(self):
return int(math.log(self.times_seen) * 600 + float(time.mktime(self.last_seen.timetuple())))
def get_latest_event(self):
from sentry.models import Event
if not hasattr(self, '_latest_event'):
latest_events = sorted(
Event.objects.filter(
group_id=self.id,
).order_by('-datetime')[0:5],
key=EVENT_ORDERING_KEY,
reverse=True,
)
try:
self._latest_event = latest_events[0]
except IndexError:
self._latest_event = None
return self._latest_event
def get_oldest_event(self):
from sentry.models import Event
if not hasattr(self, '_oldest_event'):
oldest_events = sorted(
Event.objects.filter(
group_id=self.id,
).order_by('datetime')[0:5],
key=EVENT_ORDERING_KEY,
)
try:
self._oldest_event = oldest_events[0]
except IndexError:
self._oldest_event = None
return self._oldest_event
def get_unique_tags(self, tag, since=None, order_by='-times_seen'):
# TODO(dcramer): this has zero test coverage and is a critical path
from sentry.models import GroupTagValue
queryset = GroupTagValue.objects.filter(
group=self,
key=tag,
)
if since:
queryset = queryset.filter(last_seen__gte=since)
return queryset.values_list(
'value',
'times_seen',
'first_seen',
'last_seen',
).order_by(order_by)
def get_tags(self, with_internal=True):
from sentry.models import GroupTagKey, TagKey
if not hasattr(self, '_tag_cache'):
group_tags = GroupTagKey.objects.filter(
group=self,
project=self.project,
)
if not with_internal:
group_tags = group_tags.exclude(key__startswith='sentry:')
group_tags = list(group_tags.values_list('key', flat=True))
tag_keys = dict(
(t.key, t)
for t in TagKey.objects.filter(
project=self.project,
key__in=group_tags
)
)
results = []
for key in group_tags:
try:
tag_key = tag_keys[key]
except KeyError:
label = key.replace('_', ' ').title()
else:
label = tag_key.get_label()
results.append({
'key': key,
'label': label,
})
self._tag_cache = sorted(results, key=lambda x: x['label'])
return self._tag_cache
def error(self):
return self.message
error.short_description = _('error')
def has_two_part_message(self):
message = strip(self.message)
return '\n' in message or len(message) > 100
@property
def title(self):
culprit = strip(self.culprit)
if culprit:
return culprit
return self.message
@property
def message_short(self):
message = strip(self.message)
if not message:
message = '<unlabeled message>'
else:
message = truncatechars(message.splitlines()[0], 100)
return message
@property
def organization(self):
return self.project.organization
@property
def team(self):
return self.project.team
@property
def checksum(self):
warnings.warn('Group.checksum is no longer used', DeprecationWarning)
return ''
def get_email_subject(self):
return '[%s] %s: %s' % (
self.project.get_full_name().encode('utf-8'),
six.text_type(self.get_level_display()).upper().encode('utf-8'),
self.message_short.encode('utf-8')
)
| bsd-3-clause | -4,370,704,412,550,633,000 | 30.751429 | 100 | 0.573473 | false |
gautamMalu/rootfs_xen_arndale | usr/share/python/pyversions.py | 1 | 15106 | #! /usr/bin/python
import os, re, sys
try:
SetType = set
except NameError:
import sets
SetType = sets.Set
set = sets.Set
_defaults = None
def read_default(name=None):
global _defaults
from ConfigParser import SafeConfigParser, NoOptionError
if not _defaults:
if os.path.exists('/usr/share/python/debian_defaults'):
config = SafeConfigParser()
try:
config.readfp(file('/usr/share/python/debian_defaults'))
except IOError, msg:
print msg
sys.exit(1)
_defaults = config
if _defaults and name:
try:
value = _defaults.get('DEFAULT', name)
except NoOptionError:
raise ValueError
return value
return None
def parse_versions(vstring, add_exact=False):
import operator
operators = { None: operator.eq, '=': operator.eq,
'>=': operator.ge, '<=': operator.le,
'<<': operator.lt
}
vinfo = {}
exact_versions = set([])
version_range = set(supported_versions(version_only=True)
+ old_versions(version_only=True))
relop_seen = False
for field in vstring.split(','):
field = field.strip()
if field == 'all':
vinfo['all'] = 'all'
continue
if field in ('current', 'current_ext'):
vinfo['current'] = field
continue
vinfo.setdefault('versions', set())
ve = re.compile('(>=|<=|<<|=)? *(\d\.\d)$')
m = ve.match(field)
try:
if not m:
raise ValueError('error parsing Python-Version attribute')
op, v = m.group(1), m.group(2)
vmaj, vmin = v.split('.')
# Don't silently ignore Python 3 versions.
if int(vmaj) > 2:
raise ValueError('error parsing Python-Version attribute, Python 3 version found')
if op in (None, '='):
exact_versions.add(v)
else:
relop_seen = True
filtop = operators[op]
version_range = [av for av in version_range if filtop(av ,v)]
except Exception:
raise ValueError, 'error parsing Python-Version attribute'
if add_exact:
if exact_versions:
vinfo['vexact'] = exact_versions
if 'versions' in vinfo:
if relop_seen:
vinfo['versions'] = set(version_range)
else:
del vinfo['versions']
else:
if 'versions' in vinfo:
vinfo['versions'] = exact_versions
if relop_seen:
vinfo['versions'] = exact_versions.union(version_range)
return vinfo
_old_versions = None
def old_versions(version_only=False):
global _old_versions
if not _old_versions:
try:
value = read_default('old-versions')
_old_versions = [s.strip() for s in value.split(',')]
except ValueError:
_old_versions = []
if version_only:
return [v[6:] for v in _old_versions]
else:
return _old_versions
_unsupported_versions = None
def unsupported_versions(version_only=False):
global _unsupported_versions
if not _unsupported_versions:
try:
value = read_default('unsupported-versions')
_unsupported_versions = [s.strip() for s in value.split(',')]
except ValueError:
_unsupported_versions = []
if version_only:
return [v[6:] for v in _unsupported_versions]
else:
return _unsupported_versions
_supported_versions = ["python%s" % ver.strip() for ver in
os.environ.get('DEBPYTHON_SUPPORTED', '').split(',')
if ver.strip()]
def supported_versions(version_only=False):
global _supported_versions
if not _supported_versions:
try:
value = read_default('supported-versions')
_supported_versions = [s.strip() for s in value.split(',')]
except ValueError:
cmd = ['/usr/bin/apt-cache', '--no-all-versions',
'show', 'python-all']
try:
import subprocess
p = subprocess.Popen(cmd, bufsize=1,
shell=False, stdout=subprocess.PIPE)
fd = p.stdout
except ImportError:
fd = os.popen(' '.join(cmd))
depends = None
for line in fd:
if line.startswith('Depends:'):
depends = line.split(':', 1)[1].strip().split(',')
fd.close()
if depends:
depends = [re.sub(r'\s*(\S+)[ (]?.*', r'\1', s) for s in depends]
_supported_versions = depends
if not _supported_versions:
# last resort: python-minimal not installed, apt-cache
# not available, hard code the value, #394084
_supported_versions = ['python2.6', 'python2.7']
if version_only:
return [v[6:] for v in _supported_versions]
else:
return _supported_versions
_default_version = "python%s" % os.environ.get('DEBPYTHON_DEFAULT', '')
if _default_version == 'python':
_default_version = None
def default_version(version_only=False):
global _default_version
if not _default_version:
try:
_default_version = link = os.readlink('/usr/bin/python')
except OSError:
_default_version = None
try:
cmd = ['/usr/bin/python', '-c', 'import sys; print sys.version[:3]']
import subprocess
p = subprocess.Popen(cmd, bufsize=1,
shell=False, stdout=subprocess.PIPE)
fd = p.stdout
except ImportError:
fd = os.popen("/usr/bin/python -c 'import sys; print sys.version[:3]'")
line = fd.readline().strip()
fd.close()
if re.match(r'\d\.\d$', line):
_default_version = 'python' + line
# consistency check
try:
debian_default = read_default('default-version')
except ValueError:
debian_default = "python2.7"
if not _default_version in (debian_default, os.path.join('/usr/bin', debian_default)):
raise ValueError, "/usr/bin/python does not match the python default version. It must be reset to point to %s" % debian_default
_default_version = debian_default
if version_only:
return _default_version[6:]
else:
return _default_version
def requested_versions(vstring, version_only=False):
versions = None
vinfo = parse_versions(vstring, add_exact=True)
supported = supported_versions(version_only=True)
if len(vinfo) == 1:
if 'all' in vinfo:
versions = supported
elif 'current' in vinfo:
versions = [default_version(version_only=True)]
elif 'vexact' in vinfo:
versions = vinfo['vexact']
else:
versions = vinfo['versions'].intersection(supported)
elif 'all' in vinfo and 'current' in vinfo:
raise ValueError, "both `current' and `all' in version string"
elif 'all' in vinfo:
if 'versions' in vinfo:
versions = vinfo['versions'].intersection(supported)
else:
versions = set(supported)
if 'vexact' in vinfo:
versions.update(vinfo['vexact'])
elif 'current' in vinfo:
current = default_version(version_only=True)
if not current in vinfo['versions']:
raise ValueError, "`current' version not in supported versions"
versions = [current]
elif 'versions' in vinfo or 'vexact' in vinfo:
versions = set()
if 'versions' in vinfo:
versions = vinfo['versions'].intersection(supported)
if 'vexact' in vinfo:
versions.update(vinfo['vexact'])
else:
raise ValueError, 'No Python versions in version string'
if not versions:
raise ValueError('computed set of supported versions is empty')
if version_only:
return versions
else:
return ['python%s' % v for v in versions]
def installed_versions(version_only=False):
import glob
supported = supported_versions()
versions = [os.path.basename(s)
for s in glob.glob('/usr/bin/python[0-9].[0-9]')
if os.path.basename(s) in supported]
versions.sort()
if version_only:
return [v[6:] for v in versions]
else:
return versions
class ControlFileValueError(ValueError):
pass
class MissingVersionValueError(ValueError):
pass
def extract_pyversion_attribute(fn, pkg):
"""read the debian/control file, extract the X-Python-Version or
XS-Python-Version field; check that XB-Python-Version exists for the
package."""
version = None
sversion = None
section = None
try:
fp = file(fn, 'r')
except IOError, msg:
print "Cannot open %s: %s" % (fn, msg)
sys.exit(2)
for line in fp:
line = line.strip()
if line == '':
if section == None:
continue
if pkg == 'Source':
break
section = None
elif line.startswith('Source:'):
section = 'Source'
elif line.startswith('Package: ' + pkg):
section = pkg
elif line.lower().startswith(('xs-python-version:', 'x-python-version:')):
if section != 'Source':
raise ValueError, \
'attribute X(S)-Python-Version not in Source section'
sversion = line.split(':', 1)[1].strip()
elif line.lower().startswith('xb-python-version:'):
if section == pkg:
version = line.split(':', 1)[1].strip()
if section == None:
raise ControlFileValueError, 'not a control file'
if pkg == 'Source':
if sversion == None:
raise MissingVersionValueError, \
'no X(S)-Python-Version in control file'
return sversion
if version == None:
raise MissingVersionValueError, \
'no XB-Python-Version for package `%s' % pkg
return version
# compatibility functions to parse debian/pyversions
def version_cmp(ver1,ver2):
v1=[int(i) for i in ver1.split('.')]
v2=[int(i) for i in ver2.split('.')]
return cmp(v1,v2)
def requested_versions_bis(vstring, version_only=False):
versions = []
py_supported_short = supported_versions(version_only=True)
for item in vstring.split(','):
v=item.split('-')
if len(v)>1:
if not v[0]:
v[0] = py_supported_short[0]
if not v[1]:
v[1] = py_supported_short[-1]
for ver in py_supported_short:
try:
if version_cmp(ver,v[0]) >= 0 \
and version_cmp(ver,v[1]) <= 0:
versions.append(ver)
except ValueError:
pass
else:
if v[0] in py_supported_short:
versions.append(v[0])
versions.sort(version_cmp)
if not versions:
raise ValueError, 'empty set of versions'
if not version_only:
versions=['python'+i for i in versions]
return versions
def extract_pyversion_attribute_bis(fn):
vstring = file(fn).readline().rstrip('\n')
return vstring
def main():
from optparse import OptionParser
usage = '[-v] [-h] [-d|--default] [-s|--supported] [-i|--installed] [-r|--requested <version string>|<control file>]'
parser = OptionParser(usage=usage)
parser.add_option('-d', '--default',
help='print the default python version',
action='store_true', dest='default')
parser.add_option('-s', '--supported',
help='print the supported python versions',
action='store_true', dest='supported')
parser.add_option('-r', '--requested',
help='print the python versions requested by a build; the argument is either the name of a control file or the value of the X(S)-Python-Version attribute',
action='store_true', dest='requested')
parser.add_option('-i', '--installed',
help='print the installed supported python versions',
action='store_true', dest='installed')
parser.add_option('-v', '--version',
help='print just the version number(s)',
default=False, action='store_true', dest='version_only')
opts, args = parser.parse_args()
program = os.path.basename(sys.argv[0])
if opts.default and len(args) == 0:
try:
print default_version(opts.version_only)
except ValueError, msg:
print "%s:" % program, msg
sys.exit(1)
elif opts.supported and len(args) == 0:
print ' '.join(supported_versions(opts.version_only))
elif opts.installed and len(args) == 0:
print ' '.join(installed_versions(opts.version_only))
elif opts.requested and len(args) <= 1:
if len(args) == 0:
versions = 'debian/control'
else:
versions = args[0]
try:
if os.path.isfile(versions):
fn = versions
try:
vstring = extract_pyversion_attribute(fn, 'Source')
vs = requested_versions(vstring, opts.version_only)
except ControlFileValueError:
sys.stderr.write("%s: not a control file: %s, " \
% (program, fn))
sys.exit(1)
except MissingVersionValueError:
fn = os.path.join(os.path.dirname(fn), 'pyversions')
sys.stderr.write("%s: missing X(S)-Python-Version in control file, fall back to %s\n" \
% (program, fn))
try:
vstring = extract_pyversion_attribute_bis(fn)
vs = requested_versions_bis(vstring, opts.version_only)
except IOError:
sys.stderr.write("%s: missing debian/pyversions file, fall back to supported versions\n" \
% program)
vs = supported_versions(opts.version_only)
except ValueError, e:
sys.stderr.write("%s: %s\n" % (program, e))
sys.exit(4)
else:
vs = requested_versions(versions, opts.version_only)
print ' '.join(vs)
except ValueError, msg:
sys.stderr.write("%s: %s\n" % (program, msg))
sys.exit(1)
else:
sys.stderr.write("usage: %s %s\n" % (program, usage))
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 | -4,677,142,220,586,185,000 | 36.954774 | 177 | 0.541176 | false |
tobsan/softwarecontainer | servicetest/cgroups/test_cgroups.py | 1 | 9757 |
# Copyright (C) 2016-2017 Pelagicore AB
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES
# OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
#
# For further information see LICENSE
import pytest
import os
import time
from testframework.testhelper import CGroupHelper
from testframework import Container
from testframework import Capability
from testframework import StandardManifest
from dbus.exceptions import DBusException
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
TESTOUTPUT_DIR = CURRENT_DIR + "/testoutput"
# This function is used by the test framework to know where test specific files should be stored
def output_dir():
return TESTOUTPUT_DIR
# This function is used by the 'agent' fixture to know where the log should be stored
def logfile_path():
return TESTOUTPUT_DIR + "/test.log"
# This function is used by the testframework 'testhelper' fixture to know where the
# testhelper should be made available when running the tests
def mounted_path_in_host():
return CURRENT_DIR
"""##### Configs #####"""
# These default values are used to pass various test specific values and
# configurations to the Container helper. Tests that need to add, remove or
# update entries can simply base their dict on this one for convenience.
DATA = {
Container.CONFIG: '[{ "writeBufferEnabled": false }]',
Container.BIND_MOUNT_DIR: "/gateways/app",
Container.HOST_PATH: CURRENT_DIR,
Container.READONLY: False
}
CONFIG_TEST_MEMORY_SMALL_THRESHOLD = [
{"setting": "memory.limit_in_bytes", "value": "1K"}
]
CONFIG_TEST_MEMORY_SHARE = [
{"setting": "memory.limit_in_bytes", "value": "1G"},
{"setting": "memory.memsw.limit_in_bytes", "value": "1G"}
]
CONFIG_TEST_MEMORY_WHITELISTING = [
{"setting": "memory.limit_in_bytes", "value": "2K"},
{"setting": "memory.limit_in_bytes", "value": "1M"},
{"setting": "memory.memsw.limit_in_bytes", "value": "10M"},
{"setting": "memory.memsw.limit_in_bytes", "value": "100K"}
]
TEST_NETCLS_VALUE = "0x10001"
CONFIG_TEST_NETCLS = [
{"setting": "net_cls.classid", "value": TEST_NETCLS_VALUE}
]
CPU_SHARES_LOW_VALUE = "2"
CONFIG_CPU_SHARES_LOW_THRESHOLD = [
{"setting": "cpu.shares", "value": CPU_SHARES_LOW_VALUE}
]
CPU_SHARES_WHITELISTING_VALUE = "750"
CONFIG_CPU_SHARES_WHITELISTING = [
{"setting": "cpu.shares", "value": "250"},
{"setting": "cpu.shares", "value": CPU_SHARES_WHITELISTING_VALUE}
]
cap_0 = Capability("test.cap.small.threshold",
[
{"id": "cgroups", "config": CONFIG_TEST_MEMORY_SMALL_THRESHOLD}
])
cap_1 = Capability("test.cap.memory.share",
[
{"id": "cgroups", "config": CONFIG_TEST_MEMORY_SHARE}
])
cap_2 = Capability("test.cap.memory.whitelist",
[
{"id": "cgroups", "config": CONFIG_TEST_MEMORY_WHITELISTING}
])
cap_3 = Capability("test.cap.netcls",
[
{"id": "cgroups", "config": CONFIG_TEST_NETCLS}
])
cap_4 = Capability("test.cap.cpu.shares.threshold",
[
{"id": "cgroups", "config": CONFIG_CPU_SHARES_LOW_THRESHOLD}
])
cap_5 = Capability("test.cap.cpu.shares.whitelist",
[
{"id": "cgroups", "config": CONFIG_CPU_SHARES_WHITELISTING}
])
manifest = StandardManifest(TESTOUTPUT_DIR,
"cgroup-test-manifest.json",
[cap_0, cap_1, cap_2, cap_3, cap_4, cap_5])
def service_manifests():
""" The agent fixture calls this function when it creates the service manifests
that should be used with this test module. The agent fixture expects a list
of StandardManifest and/or DefaultManifest objects.
"""
return [manifest]
"""##### Test suites #####"""
@pytest.mark.usefixtures("testhelper", "create_testoutput_dir", "agent")
class TestCGroupGateway(object):
""" This suite tests that whether CGroupsGateway can working with supported options or not.
Prerequisites :
CONFIG_MEMCG, CONFIG_MEMCG_SWAP and CONFIG_CGROUP_NET_CLASSID kernel options should have
been enabled for these tests. Also cgroup_enable=memory and swapaccount=1 options should be
enabled by grub.
"""
def test_memory_cgroup_small_threshold(self):
""" Test if the memory limiting with cgroup gateway is working as expected
which behavior is when allocated more memory than limit it should fail
"""
try:
sc = Container()
sc.start(DATA)
with pytest.raises(DBusException) as err:
sc.set_capabilities(["test.cap.small.threshold"])
assert err.value.get_dbus_name() == Container.DBUS_EXCEPTION_FAILED
finally:
sc.terminate()
def test_memory_cgroup_limit(self):
""" Test if the memory limiting with cgroup gateway is working as expected
which behavior is when allocated more memory than limit it should fail
"""
try:
sc = Container()
sc.start(DATA)
sc.set_capabilities(["test.cap.memory.share"])
memory_limitation = 1024 * 1024 * 1024
sc.launch_command("python " +
sc.get_bind_dir() +
"/testhelper.py" +
" --test-dir " +
sc.get_bind_dir() +
" --do-allocate " +
str(memory_limitation))
# wait 5 seconds for previous operation to end
time.sleep(3)
helper = CGroupHelper(CURRENT_DIR)
allocation_return = helper.result()
assert allocation_return < memory_limitation
helper.remove_file()
finally:
sc.terminate()
def test_memory_cgroup_whitelisting(self):
""" Test if the whitelisting on cgroup memory is working as expected which behavior
is more permissive configuration should be applied on memory.limit_in_bytes
"""
try:
sc = Container()
cid = sc.start(DATA)
containerID = "SC-" + str(cid)
sc.set_capabilities(["test.cap.memory.whitelist"])
most_permissive_value = 1024 * 1024
time.sleep(0.5)
with open("/sys/fs/cgroup/memory/lxc/" + containerID + "/memory.limit_in_bytes", "r") as fh:
limit_in_bytes = int(fh.read())
assert limit_in_bytes == most_permissive_value
most_permissive_value = 10 * 1024 * 1024
with open("/sys/fs/cgroup/memory/lxc/" + containerID + "/memory.memsw.limit_in_bytes", "r") as fh:
memsw_limit = int(fh.read())
assert memsw_limit == most_permissive_value
finally:
sc.terminate()
def test_netcls_cgroup_set(self):
""" Test that the classid on net_cls cgroup gets set. Does not currently test if it is
picked up anywhere.
TODO: For future, run some network application inside the container, listen for the
traffic on the outside and check that the classid's are being set.
"""
try:
sc = Container()
cid = sc.start(DATA)
containerID = "SC-" + str(cid)
sc.set_capabilities(["test.cap.netcls"])
time.sleep(0.5)
with open("/sys/fs/cgroup/net_cls/lxc/" + containerID + "/net_cls.classid", "r") as fh:
value = int(fh.read())
assert value == int(TEST_NETCLS_VALUE, base=16)
finally:
sc.terminate()
def test_cpu_shares_cgroup_set(self):
""" Test that cpu.shares gets set. Does not test CPU usage.
"""
try:
sc = Container()
cid = sc.start(DATA)
containerID = "SC-" + str(cid)
sc.set_capabilities(["test.cap.cpu.shares.threshold"])
time.sleep(0.5)
with open("/sys/fs/cgroup/cpu/lxc/" + containerID + "/cpu.shares", "r") as fh:
value = int(fh.read())
assert value == int(CPU_SHARES_LOW_VALUE)
finally:
sc.terminate()
def test_cpu_shares_cgroup_whitelisting(self):
""" Test that whitelisting works when setting cpu.shares. Does not test CPU usage.
"""
try:
sc = Container()
cid = sc.start(DATA)
containerID = "SC-" + str(cid)
sc.set_capabilities(["test.cap.cpu.shares.whitelist"])
most_permissive_value = int(CPU_SHARES_WHITELISTING_VALUE)
time.sleep(0.5)
with open("/sys/fs/cgroup/cpu/lxc/" + containerID + "/cpu.shares", "r") as fh:
value = int(fh.read())
assert value == most_permissive_value
finally:
sc.terminate()
| lgpl-2.1 | 2,801,285,750,945,114,000 | 34.609489 | 110 | 0.591268 | false |
tungvx/deploy | Django-0.90/tests/testapp/models/m2o_recursive.py | 1 | 1066 | """
11. Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self')``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.core import meta
class Category(meta.Model):
name = meta.CharField(maxlength=20)
parent = meta.ForeignKey('self', null=True, related_name='child')
class META:
module_name = 'categories'
def __repr__(self):
return self.name
API_TESTS = """
# Create a few Category objects.
>>> r = categories.Category(id=None, name='Root category', parent=None)
>>> r.save()
>>> c = categories.Category(id=None, name='Child category', parent=r)
>>> c.save()
>>> r.get_child_list()
[Child category]
>>> r.get_child(name__startswith='Child')
Child category
>>> r.get_parent()
Traceback (most recent call last):
...
CategoryDoesNotExist
>>> c.get_child_list()
[]
>>> c.get_parent()
Root category
"""
| apache-2.0 | 3,638,889,243,003,648,500 | 23.227273 | 74 | 0.67167 | false |
cadappl/krep | krep_subcmds/help_subcmd.py | 1 | 2299 |
from topics import key_compare, SubCommand
class HelpSubcmd(SubCommand):
COMMAND = 'help'
help_summary = 'Print the command summaries'
help_usage = '''\
%prog <subcmd> ...
Display the detailed usage of the sub-command or the list of all supported
sub-commands.
Environment variables KREP_EXTRA_PATH and KREP_SUBCMD_PATH could define new
external sub-commands. Try to define the variables if required.
The argument "all" indicats to list all sub-commands implicitly.'''
def _print_all_commands(self):
print('Usage: krep subcmd [args] ...')
print('The commands of krep are:')
print('')
lines = list()
for name, cmd in self.commands.items(): # pylint: disable=E1101
try:
summary = cmd.help_summary.strip()
except AttributeError:
summary = 'No Summary'
if name in getattr(cmd, 'ALIASES', list()):
summary = 'Alias of "%s"' % getattr(cmd, 'COMMAND', cmd.NAME)
lines.append(' %-15s%s' % (name, summary))
def sort_help(linea, lineb):
def _is_help_command(line):
return line.lstrip().startswith('help')
if _is_help_command(linea):
return -1
elif _is_help_command(lineb):
return 1
return (linea > lineb) - (linea < lineb) # cmp(linea, lineb)
# put help command on the top
lines.sort(key=key_compare(sort_help))
print('\n'.join(lines))
print('\nSee more info with "krep help <command>"')
def _print_command(self, command):
if command not in self.commands: # pylint: disable=E1101
print('krep: "%s" is not a known command' % command)
else:
try:
cmd = self.commands[command] # pylint: disable=E1101
help_usage = cmd.help_usage
except AttributeError:
help_usage = 'Failed to read the command help.'
print(help_usage.replace('%prog', 'krep %s' % command))
def execute(self, options, *args): # pylint: disable=W0613
if len(args) == 0 or 'all' in args:
self._print_all_commands()
else:
for arg in args:
self._print_command(arg)
| lgpl-3.0 | -4,619,710,783,916,100,000 | 32.318841 | 77 | 0.570248 | false |
kevinkahn/softconsole | screens/screen.py | 1 | 16940 | import collections
import pygame
import functools
from guicore.switcher import SwitchScreen
import config
import hubs.hubs
import logsupport
import screens.__screens as screens
import stores.paramstore as paramstore
import stores.valuestore as valuestore
from keyspecs import toucharea
from logsupport import ConsoleError, ConsoleWarning, ConsoleDetail
from utils.utilfuncs import wc, tint, fmt
from utils import timers, utilities, fonts, displayupdate, hw
ScreenParams = {'DimTO': 99,
'CharColor': "white",
'PersistTO': 20,
'BackgroundColor': 'maroon',
'CmdKeyCol': "red",
'CmdCharCol': "white",
'DefaultHub': '',
'KeyColor': "aqua",
'KeyColorOn': "",
'KeyColorOff': "",
'KeyCharColorOn': "white",
'KeyCharColorOff': "black",
'KeyOnOutlineColor': "white",
'KeyOffOutlineColor': "black",
'KeyOutlineOffset': 3,
'KeyLabelOn': ['', ],
'KeyLabelOff': ['', ],
'ScreenTitleColor': "white",
'ScreenTitleSize': 50,
'ScreenTitle': '',
'ScreenTitleFields': ['', ],
'HorizBorder': 20,
'TopBorder': 20,
'BotBorder': 60,
'BotBorderWONav': 20,
'HorizButtonGap': 0,
'VertButGap': 0,
'NavKeyHeight': 60,
'HorizButGap': 0,
'NavKeyWidth': 60
}
screenStore = valuestore.NewValueStore(paramstore.ParamStore('ScreenParams'))
BACKTOKEN = None
HOMETOKEN = None
SELFTOKEN = None
def CommonClockTick(params):
# noinspection PyProtectedMember
config.AS._ClockTick(params)
# noinspection PyProtectedMember
def CommonClockTickValid():
return config.AS._ClockTickValid()
StdScreenClock = 1
ScreenClocks = {StdScreenClock: timers.RepeatingPost(StdScreenClock, paused=True, start=True, name='StdScreenClock',
proc=CommonClockTick, eventvalid=CommonClockTickValid)}
RunningScreenClock = ScreenClocks[StdScreenClock]
def InitScreenParams(parseconfig):
screens.screenStore = screenStore
for p, v in ScreenParams.items():
screenStore.SetVal(p, type(v)(parseconfig.get(p, v)))
def GoToScreen(NS, newstate='NonHome'):
SwitchScreen(NS, 'Bright', 'Go to Screen', newstate=newstate)
def PushToScreen(NS, newstate='NonHome', msg='Push to Screen'):
SwitchScreen(NS, 'Bright', msg, newstate=newstate, push=True)
def PopScreen(msg='PopScreen', newstate='Maint'):
SwitchScreen(BACKTOKEN, 'Bright', msg, newstate=newstate)
def IncorporateParams(this, clsnm, theseparams, screensection):
if screensection is None: screensection = {}
for p in theseparams:
if isinstance(theseparams, dict):
if theseparams[p] is not None: this.userstore.SetVal(p, theseparams[p]) # a value was set in config file
else:
if p in screensection:
# this.userstore.SetVal(p, type(ScreenParams[p])(screensection.get(p, ""))) # string only safe default
this.userstore.SetVal(p, type(ScreenParams[p])(screensection.get(p, ScreenParams[p])))
def AddUndefaultedParams(this, screensection, **kwargs):
if screensection is None: screensection = {}
for n, v in kwargs.items():
if n in this.__dict__: del this.__dict__[n] # remove if it was declared statically
this.userstore.SetVal(n, type(v)(screensection.get(n, v)))
def FlatenScreenLabel(label):
scrlabel = label[0]
for s in label[1:]:
scrlabel = scrlabel + " " + s
return scrlabel
def ButLayout(butcount):
# 1 2 3 4 5 6 7 8 9 10
plan = ((1, 1), (1, 2), (1, 3), (2, 2), (1, 5), (2, 3), (2, 4), (2, 4), (3, 3), (4, 3),
# 11 12 13 14 15 16 17 18 19 20
(4, 3), (4, 3), (4, 4), (4, 4), (4, 4), (4, 4), (5, 4), (5, 4), (5, 4), (5, 4))
if butcount in range(1, 21):
return plan[butcount - 1]
else:
logsupport.Logs.Log("Button layout error - too many or no buttons: {}".format(butcount), severity=ConsoleError)
return 5, 5
class ScreenDesc(object):
"""
Basic information about a screen, subclassed by all other screens to handle this information
"""
def __setattr__(self, key, value):
if key not in ScreenParams:
object.__setattr__(self, key, value)
else:
self.userstore.SetVal(key, value)
# object.__setattr__(self, key, value)
def __getattr__(self, key):
return self.userstore.GetVal(key)
def __init__(self, screensection, screenname, parentscreen=None, SingleUse=False, Type='unset'):
self.userstore = paramstore.ParamStore('Screen-' + screenname,
dp=screenStore if parentscreen is None else parentscreen.userstore,
locname=screenname)
# todo add routine to update allowable mods per screen - but rationalize with incorp parameters from hight level guys
self.ScreenType = Type
self.markradius = int(min(hw.screenwidth, hw.screenheight) * .025)
self.name = screenname
self.singleuse = SingleUse
self.used = False
self.WatchMotion = False
self.Active = False # true if actually on screen
self.ChildScreens = {} # used to do cascaded deleted if this screen is deleted. Only list one-off dependents
self.DefaultNavKeysShowing = True
self.NavKeysShowing = True
self.NavKeys = collections.OrderedDict()
self.Keys = collections.OrderedDict()
self.WithNav = True
self.useablevertspace = hw.screenheight - self.TopBorder - self.BotBorder
self.useablevertspacesansnav = hw.screenheight - self.TopBorder - self.BotBorderWONav
self.useablehorizspace = hw.screenwidth - 2 * self.HorizBorder
self.startvertspace = self.TopBorder
self.starthorizspace = self.HorizBorder
self.HubInterestList = {} # one entry per hub, each entry is a dict mapping addr to Node
self.ScreenTitleBlk = None
self.ScreenTitle = ''
self.prevkey = None
self.nextkey = None
self.NavKeyWidth = (hw.screenwidth - 2 * self.HorizBorder) // 2
cvertcenter = hw.screenheight - self.BotBorder / 2
self.homekey = toucharea.ManualKeyDesc(self, 'Back<' + 'Home', ('Home',),
self.CmdKeyCol, self.CmdCharCol, self.CmdCharCol,
proc=functools.partial(GoToScreen, HOMETOKEN),
center=(
self.starthorizspace + .5 * self.NavKeyWidth,
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
self.backkey = toucharea.ManualKeyDesc(self, 'Nav>' + 'Back', ('Back',),
self.CmdKeyCol, self.CmdCharCol, self.CmdCharCol,
proc=functools.partial(GoToScreen, BACKTOKEN),
center=(
self.starthorizspace + 1.5 * self.NavKeyWidth,
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
IncorporateParams(self, 'Screen',
{'CharColor', 'DimTO', 'PersistTO', 'BackgroundColor', 'CmdKeyCol', 'CmdCharCol',
'DefaultHub', 'ScreenTitle', 'ScreenTitleColor', 'ScreenTitleFields', 'ScreenTitleSize', 'KeyCharColorOn',
'KeyCharColorOff', 'KeyColor'}, screensection)
AddUndefaultedParams(self, screensection, label=[screenname])
try:
self.DefaultHubObj = hubs.hubs.Hubs[self.DefaultHub]
except KeyError:
self.DefaultHubObj = None # todo test what happens later or force this to be an exiting error
logsupport.Logs.Log("Bad default hub name for screen: ", screenname, severity=ConsoleError)
raise ValueError
self.DecodedScreenTitleFields = []
for f in self.ScreenTitleFields:
if ':' in f:
self.DecodedScreenTitleFields.append(f.split(':'))
# todo compute the vertical space issue for the title if non-null; generate and horiz space the title later
if self.ScreenTitle != '':
# adjust space for a title
tempblk, _ = self._GenerateTitleBlk(self.ScreenTitle, self.DecodedScreenTitleFields, self.ScreenTitleColor)
h = tempblk.get_height()
titlegap = h // 10 # todo is this the best way to space?
self.startvertspace = self.startvertspace + h + titlegap
self.useablevertspace = self.useablevertspace - h - titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav - h - titlegap
self.ScreenTitleBlk = tempblk
self.ScreenClock = ScreenClocks[StdScreenClock]
self.ScreenTimers = [] # (Timer, Cancel Proc or None) Don't put ScreenCLock in the list or it gets canceled
utilities.register_example('ScreenDesc', self)
def _GenerateTitleBlk(self, title, fields, color):
vals = ['--' if v is None else v for v in [valuestore.GetVal(f) for f in fields]]
formattedTitle = fmt.format(title, *vals)
blk = fonts.fonts.Font(self.ScreenTitleSize, bold=True).render(formattedTitle, 0, wc(color))
w = blk.get_width()
return blk, w
def _ClockTickValid(self):
if not self.Active: print('Clock not valid {}'.format(self.name))
return self.Active
# noinspection PyUnusedLocal
def _ClockTick(self, params):
if not self.Active: return # avoid race with timer and screen exit
self.ClockTick()
def ClockTick(self): # this is meant to be overridden if screen want more complex operation
self.ReInitDisplay()
def SetScreenClock(self, interval):
# for screens with a non standard clocking rate
if interval in ScreenClocks:
self.ScreenClock = ScreenClocks[interval]
else:
self.ScreenClock = timers.RepeatingPost(interval, paused=True, start=True,
name='ScreenClock-' + str(interval),
proc=CommonClockTick, eventvalid=CommonClockTickValid)
ScreenClocks[interval] = self.ScreenClock
def CreateNavKeys(self, prevk, nextk):
cvertcenter = hw.screenheight - self.BotBorder / 2
self.prevkey = toucharea.ManualKeyDesc(self, 'Nav<' + prevk.name,
prevk.label,
prevk.CmdKeyCol, prevk.CmdCharCol,
prevk.CmdCharCol,
proc=functools.partial(GoToScreen, prevk),
center=(
self.starthorizspace + .5 * (
self.NavKeyWidth),
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
self.nextkey = toucharea.ManualKeyDesc(self, 'Nav>' + nextk.name,
nextk.label,
nextk.CmdKeyCol, nextk.CmdCharCol,
nextk.CmdCharCol,
proc=functools.partial(GoToScreen, nextk),
center=(
self.starthorizspace + 1.5 * (
self.NavKeyWidth),
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
def ClearScreenTitle(self):
if self.ScreenTitleBlk is None: return
h = self.ScreenTitleBlk.get_height()
self.ScreenTitleBlk = None
self.ScreenTitle = ''
titlegap = h // 10
self.startvertspace = self.startvertspace - h - titlegap
self.useablevertspace = self.useablevertspace + h + titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav + h + titlegap
def SetScreenTitle(self, name, fontsz, color, force=False):
if self.ScreenTitleBlk is not None and not force:
return # User explicitly set a title so don't override it
self.ClearScreenTitle()
self.ScreenTitle = name
self.ScreenTitleBlk = fonts.fonts.Font(fontsz).render(name, 0, wc(color))
h = self.ScreenTitleBlk.get_height()
titlegap = h // 10 # todo is this the best way to space? if fix - fix clear also
self.startvertspace = self.startvertspace + h + titlegap
self.useablevertspace = self.useablevertspace - h - titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav - h - titlegap
def ButSize(self, bpr, bpc, height):
h = self.useablevertspace if height == 0 else height
return (
self.useablehorizspace / bpr, h / bpc)
def PaintNavKeys(self):
for key in self.NavKeys.values():
key.PaintKey()
def PaintKeys(self):
if self.Keys is not None:
for key in self.Keys.values():
if type(key) is not toucharea.TouchPoint:
key.PaintKey()
self.PaintNavKeys()
def ScreenContentRepaint(self):
pass
def AddToHubInterestList(self, hub, item, value):
if hub.name in self.HubInterestList:
self.HubInterestList[hub.name][item] = value
else:
self.HubInterestList[hub.name] = {item: value}
def _PrepScreen(self, nav=None, init=True):
global RunningScreenClock
if self.used:
logsupport.Logs.Log('Attempted reuse (Init: {}) of single use screen {}'.format(init, self.name),
severity=ConsoleError)
if init:
if self.ScreenClock != RunningScreenClock:
RunningScreenClock.pause()
self.ScreenClock.resume()
RunningScreenClock = self.ScreenClock
else:
RunningScreenClock.resume()
if init: self.NavKeys = nav
self.PaintBase()
self.PaintKeys()
if self.ScreenTitleBlk is not None:
self.ScreenTitleBlk, w = self._GenerateTitleBlk(self.ScreenTitle, self.DecodedScreenTitleFields,
self.ScreenTitleColor)
hw.screen.blit(self.ScreenTitleBlk,
(self.starthorizspace + (self.useablehorizspace - w) // 2, self.TopBorder))
self.ScreenContentRepaint()
displayupdate.updatedisplay()
def InitDisplay(self, nav):
self._PrepScreen(nav, True)
def ReInitDisplay(self):
self._PrepScreen(None, False)
def NodeEvent(self, evnt):
if evnt.node is not None:
if evnt.hub != '*VARSTORE*': # var changes can be reported while any screen is up
logsupport.Logs.Log("Unexpected event to screen: ", self.name, ' Hub: ', str(evnt.hub), ' Node: ',
str(evnt.node),
' Val: ', str(evnt.value), severity=ConsoleDetail)
else:
pass
else:
logsupport.Logs.Log(
'Node event to screen {} with no handler node: {} (Event: {})'.format(self.name, evnt.node, evnt),
severity=ConsoleWarning, hb=True, tb=True)
def VarEvent(self, evnt):
pass # var changes can happen with any screen up so if screen doesn't care about vars it doesn't define a handler
# logsupport.Logs.Log('Var event to screen {} with no handler (Event: {})'.format(self.name,evnt), severity=ConsoleError)
def ExitScreen(self, viaPush):
if self.ScreenClock is not None: self.ScreenClock.pause()
for timer in self.ScreenTimers:
if timer[0].is_alive():
timer[0].cancel()
if timer[1] is not None: timer[1]()
self.ScreenTimers = []
if self.singleuse:
if viaPush:
pass
else:
self.userstore.DropStore()
for nm, k in self.Keys.items():
if hasattr(k, 'userstore'): k.userstore.DropStore()
for nm, k in self.NavKeys.items():
if hasattr(k, 'userstore'): k.userstore.DropStore()
self.used = True
def DeleteScreen(self):
# explicit screen destroy
self.userstore.DropStore()
for timer in self.ScreenTimers:
if timer[0].is_alive():
timer[0].cancel()
if timer[1] is not None: timer[1]()
for n, s in self.ChildScreens.items():
s.DeleteScreen()
def PopOver(self):
try:
if self.singleuse:
self.userstore.DropStore()
for nm, k in self.Keys.items():
k.userstore.DropStore()
for nm, k in self.NavKeys.items():
k.userstore.DropStore()
self.used = True
except Exception as E:
logsupport.Logs.Log('Screen sequencing exception for screen {}: {}'.format(self.name, repr(E)),
severity=ConsoleWarning)
def PaintBase(self):
hw.screen.fill(wc(self.BackgroundColor))
lineclr = tint(self.BackgroundColor, tint_factor=.5)
if config.sysStore.NetErrorIndicator:
pygame.draw.circle(hw.screen, tint(self.BackgroundColor, tint_factor=.5),
(self.markradius, self.markradius), self.markradius, 0)
lineclr = wc(self.BackgroundColor)
if config.sysStore.ErrorNotice != -1:
pygame.draw.line(hw.screen, lineclr, (0, self.markradius), (2 * self.markradius, self.markradius), 3)
pygame.draw.line(hw.screen, lineclr, (self.markradius, 0), (self.markradius, 2 * self.markradius), 3)
class BaseKeyScreenDesc(ScreenDesc):
def __init__(self, screensection, screenname, parentscreen=None, SingleUse=False):
super().__init__(screensection, screenname, parentscreen=parentscreen, SingleUse=SingleUse)
AddUndefaultedParams(self, screensection, KeysPerColumn=0, KeysPerRow=0)
self.buttonsperrow = -1
self.buttonspercol = -1
utilities.register_example('BaseKeyScreenDesc', self)
def LayoutKeys(self, extraOffset=0, height=0):
# Compute the positions and sizes for the Keys and store in the Key objects
explicitlayout = self.KeysPerColumn * self.KeysPerRow
if explicitlayout != 0:
# user provided explicit button layout
if explicitlayout >= len(self.Keys):
# user layout provides enough space
bpr, bpc = (self.KeysPerRow, self.KeysPerColumn)
else:
# bad user layout - go with automatic
logsupport.Logs.Log('Bad explicit key layout for: ', self.name, severity=ConsoleWarning)
bpr, bpc = ButLayout(len(self.Keys))
else:
bpr, bpc = ButLayout(
len(self.Keys)) # don't do this if explicit layout spec's because may be more keys than it can handle
self.buttonsperrow = bpr
self.buttonspercol = bpc
buttonsize = self.ButSize(bpr, bpc, height)
hpos = []
vpos = []
for i in range(bpr):
hpos.append(self.starthorizspace + (.5 + i) * buttonsize[0])
for i in range(bpc):
vpos.append(self.startvertspace + extraOffset + (.5 + i) * buttonsize[1])
for i, (kn, key) in enumerate(self.Keys.items()):
key.FinishKey((hpos[i % bpr], vpos[i // bpr]), buttonsize)
| apache-2.0 | -8,380,265,676,621,377,000 | 35.119403 | 122 | 0.689906 | false |
wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/sympy/sympy/integrals/manualintegrate.py | 2 | 39796 | """Integration method that emulates by-hand techniques.
This module also provides functionality to get the steps used to evaluate a
particular integral, in the ``integral_steps`` function. This will return
nested namedtuples representing the integration rules used. The
``manualintegrate`` function computes the integral using those steps given
an integrand; given the steps, ``_manualintegrate`` will evaluate them.
The integrator can be extended with new heuristics and evaluation
techniques. To do so, write a function that accepts an ``IntegralInfo``
object and returns either a namedtuple representing a rule or
``None``. Then, write another function that accepts the namedtuple's fields
and returns the antiderivative, and decorate it with
``@evaluates(namedtuple_type)``. If the new technique requires a new
match, add the key and call to the antiderivative function to integral_steps.
To enable simple substitutions, add the match to find_substitutions.
"""
from __future__ import print_function, division
from collections import namedtuple
import sympy
from sympy.core.compatibility import reduce
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.strategies.core import (switch, identity, do_one, null_safe,
condition, tryit)
def Rule(name, props=""):
# GOTCHA: namedtuple class name not considered!
def __eq__(self, other):
return self.__class__ == other.__class__ and tuple.__eq__(self, other)
__neq__ = lambda self, other: not __eq__(self, other)
cls = namedtuple(name, props + " context symbol")
cls.__eq__ = __eq__
cls.__ne__ = __neq__
return cls
ConstantRule = Rule("ConstantRule", "constant")
ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep")
PowerRule = Rule("PowerRule", "base exp")
AddRule = Rule("AddRule", "substeps")
URule = Rule("URule", "u_var u_func constant substep")
PartsRule = Rule("PartsRule", "u dv v_step second_step")
CyclicPartsRule = Rule("CyclicPartsRule", "parts_rules coefficient")
TrigRule = Rule("TrigRule", "func arg")
ExpRule = Rule("ExpRule", "base exp")
ReciprocalRule = Rule("ReciprocalRule", "func")
ArctanRule = Rule("ArctanRule")
ArcsinRule = Rule("ArcsinRule")
InverseHyperbolicRule = Rule("InverseHyperbolicRule", "func")
AlternativeRule = Rule("AlternativeRule", "alternatives")
DontKnowRule = Rule("DontKnowRule")
DerivativeRule = Rule("DerivativeRule")
RewriteRule = Rule("RewriteRule", "rewritten substep")
PiecewiseRule = Rule("PiecewiseRule", "subfunctions")
HeavisideRule = Rule("HeavisideRule", "func")
TrigSubstitutionRule = Rule("TrigSubstitutionRule", "theta func rewritten substep")
IntegralInfo = namedtuple('IntegralInfo', 'integrand symbol')
evaluators = {}
def evaluates(rule):
def _evaluates(func):
func.rule = rule
evaluators[rule] = func
return func
return _evaluates
def contains_dont_know(rule):
if isinstance(rule, DontKnowRule):
return True
else:
for val in rule:
if isinstance(val, tuple):
if contains_dont_know(val):
return True
elif isinstance(val, list):
if any(contains_dont_know(i) for i in val):
return True
return False
def manual_diff(f, symbol):
"""Derivative of f in form expected by find_substitutions
SymPy's derivatives for some trig functions (like cot) aren't in a form
that works well with finding substitutions; this replaces the
derivatives for those particular forms with something that works better.
"""
if f.args:
arg = f.args[0]
if isinstance(f, sympy.tan):
return arg.diff(symbol) * sympy.sec(arg)**2
elif isinstance(f, sympy.cot):
return -arg.diff(symbol) * sympy.csc(arg)**2
elif isinstance(f, sympy.sec):
return arg.diff(symbol) * sympy.sec(arg) * sympy.tan(arg)
elif isinstance(f, sympy.csc):
return -arg.diff(symbol) * sympy.csc(arg) * sympy.cot(arg)
elif isinstance(f, sympy.Add):
return sum([manual_diff(arg, symbol) for arg in f.args])
elif isinstance(f, sympy.Mul):
if len(f.args) == 2 and isinstance(f.args[0], sympy.Number):
return f.args[0] * manual_diff(f.args[1], symbol)
return f.diff(symbol)
# Method based on that on SIN, described in "Symbolic Integration: The
# Stormy Decade"
def find_substitutions(integrand, symbol, u_var):
results = []
def test_subterm(u, u_diff):
substituted = integrand / u_diff
if symbol not in substituted.free_symbols:
# replaced everything already
return False
substituted = substituted.subs(u, u_var).cancel()
if symbol not in substituted.free_symbols:
return substituted.as_independent(u_var, as_Add=False)
return False
def possible_subterms(term):
if isinstance(term, (TrigonometricFunction,
sympy.asin, sympy.acos, sympy.atan,
sympy.exp, sympy.log, sympy.Heaviside)):
return [term.args[0]]
elif isinstance(term, sympy.Mul):
r = []
for u in term.args:
r.append(u)
r.extend(possible_subterms(u))
return r
elif isinstance(term, sympy.Pow):
if term.args[1].is_constant(symbol):
return [term.args[0]]
elif term.args[0].is_constant(symbol):
return [term.args[1]]
elif isinstance(term, sympy.Add):
r = []
for arg in term.args:
r.append(arg)
r.extend(possible_subterms(arg))
return r
return []
for u in possible_subterms(integrand):
if u == symbol:
continue
u_diff = manual_diff(u, symbol)
new_integrand = test_subterm(u, u_diff)
if new_integrand is not False:
constant, new_integrand = new_integrand
substitution = (u, constant, new_integrand)
if substitution not in results:
results.append(substitution)
return results
def rewriter(condition, rewrite):
"""Strategy that rewrites an integrand."""
def _rewriter(integral):
integrand, symbol = integral
if condition(*integral):
rewritten = rewrite(*integral)
if rewritten != integrand:
substep = integral_steps(rewritten, symbol)
if not isinstance(substep, DontKnowRule):
return RewriteRule(
rewritten,
substep,
integrand, symbol)
return _rewriter
def proxy_rewriter(condition, rewrite):
"""Strategy that rewrites an integrand based on some other criteria."""
def _proxy_rewriter(criteria):
criteria, integral = criteria
integrand, symbol = integral
args = criteria + list(integral)
if condition(*args):
rewritten = rewrite(*args)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return _proxy_rewriter
def multiplexer(conditions):
"""Apply the rule that matches the condition, else None"""
def multiplexer_rl(expr):
for key, rule in conditions.items():
if key(expr):
return rule(expr)
return multiplexer_rl
def alternatives(*rules):
"""Strategy that makes an AlternativeRule out of multiple possible results."""
def _alternatives(integral):
alts = []
for rule in rules:
result = rule(integral)
if (result and not isinstance(result, DontKnowRule) and
result != integral and result not in alts):
alts.append(result)
if len(alts) == 1:
return alts[0]
elif alts:
doable = [rule for rule in alts if not contains_dont_know(rule)]
if doable:
return AlternativeRule(doable, *integral)
else:
return AlternativeRule(alts, *integral)
return _alternatives
def constant_rule(integral):
integrand, symbol = integral
return ConstantRule(integral.integrand, *integral)
def power_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
if symbol not in exp.free_symbols and isinstance(base, sympy.Symbol):
if sympy.simplify(exp + 1) == 0:
return ReciprocalRule(base, integrand, symbol)
return PowerRule(base, exp, integrand, symbol)
elif symbol not in base.free_symbols and isinstance(exp, sympy.Symbol):
rule = ExpRule(base, exp, integrand, symbol)
if sympy.ask(~sympy.Q.zero(sympy.log(base))):
return rule
elif sympy.ask(sympy.Q.zero(sympy.log(base))):
return ConstantRule(1, 1, symbol)
return PiecewiseRule([
(ConstantRule(1, 1, symbol), sympy.Eq(sympy.log(base), 0)),
(rule, True)
], integrand, symbol)
def exp_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return ExpRule(sympy.E, integrand.args[0], integrand, symbol)
def inverse_trig_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = base.match(a + b*symbol**2)
if not match:
return
def negative(x):
return sympy.ask(sympy.Q.negative(x)) or x.is_negative or x.could_extract_minus_sign()
def ArcsinhRule(integrand, symbol):
return InverseHyperbolicRule(sympy.asinh, integrand, symbol)
def ArccoshRule(integrand, symbol):
return InverseHyperbolicRule(sympy.acosh, integrand, symbol)
def make_inverse_trig(RuleClass, base_exp, a, sign_a, b, sign_b):
u_var = sympy.Dummy("u")
current_base = base
current_symbol = symbol
constant = u_func = u_constant = substep = None
factored = integrand
if a != 1:
constant = a**base_exp
current_base = sign_a + sign_b * (b/a) * current_symbol**2
factored = current_base ** base_exp
if (b/a) != 1:
u_func = sympy.sqrt(b/a) * symbol
u_constant = sympy.sqrt(a/b)
current_symbol = u_var
current_base = sign_a + sign_b * current_symbol**2
substep = RuleClass(current_base ** base_exp, current_symbol)
if u_func is not None:
if u_constant != 1:
substep = ConstantTimesRule(
u_constant, current_base ** base_exp, substep,
u_constant * current_base ** base_exp, symbol)
substep = URule(u_var, u_func, u_constant, substep, factored, symbol)
if constant is not None:
substep = ConstantTimesRule(constant, factored, substep, integrand, symbol)
return substep
a, b = match[a], match[b]
# list of (rule, base_exp, a, sign_a, b, sign_b, condition)
possibilities = []
if sympy.simplify(exp + 1) == 0 and not (negative(a) or negative(b)):
possibilities.append((ArctanRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
elif sympy.simplify(2*exp + 1) == 0:
possibilities.append((ArcsinRule, exp, a, 1, -b, -1, sympy.And(a > 0, b < 0)))
possibilities.append((ArcsinhRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
possibilities.append((ArccoshRule, exp, -a, -1, b, 1, sympy.And(a < 0, b > 0)))
possibilities = [p for p in possibilities if p[-1] is not sympy.false]
if a.is_number and b.is_number:
possibility = [p for p in possibilities if p[-1] is sympy.true]
if len(possibility) == 1:
return make_inverse_trig(*possibility[0][:-1])
elif possibilities:
return PiecewiseRule(
[(make_inverse_trig(*p[:-1]), p[-1]) for p in possibilities],
integrand, symbol)
def add_rule(integral):
integrand, symbol = integral
return AddRule(
[integral_steps(g, symbol)
for g in integrand.as_ordered_terms()],
integrand, symbol)
def mul_rule(integral):
integrand, symbol = integral
args = integrand.args
# Constant times function case
coeff, f = integrand.as_independent(symbol)
if coeff != 1:
return ConstantTimesRule(
coeff, f,
integral_steps(f, symbol),
integrand, symbol)
def _parts_rule(integrand, symbol):
# LIATE rule:
# log, inverse trig, algebraic (polynomial), trigonometric, exponential
def pull_out_polys(integrand):
integrand = integrand.together()
polys = [arg for arg in integrand.args if arg.is_polynomial(symbol)]
if polys:
u = sympy.Mul(*polys)
dv = integrand / u
return u, dv
def pull_out_u(*functions):
def pull_out_u_rl(integrand):
if any([integrand.has(f) for f in functions]):
args = [arg for arg in integrand.args
if any(isinstance(arg, cls) for cls in functions)]
if args:
u = reduce(lambda a,b: a*b, args)
dv = integrand / u
return u, dv
return pull_out_u_rl
liate_rules = [pull_out_u(sympy.log), pull_out_u(sympy.atan, sympy.asin, sympy.acos),
pull_out_polys, pull_out_u(sympy.sin, sympy.cos),
pull_out_u(sympy.exp)]
dummy = sympy.Dummy("temporary")
# we can integrate log(x) and atan(x) by setting dv = 1
if isinstance(integrand, (sympy.log, sympy.atan, sympy.asin, sympy.acos)):
integrand = dummy * integrand
for index, rule in enumerate(liate_rules):
result = rule(integrand)
if result:
u, dv = result
# Don't pick u to be a constant if possible
if symbol not in u.free_symbols and not u.has(dummy):
return
u = u.subs(dummy, 1)
dv = dv.subs(dummy, 1)
for rule in liate_rules[index + 1:]:
r = rule(integrand)
# make sure dv is amenable to integration
if r and r[0].subs(dummy, 1) == dv:
du = u.diff(symbol)
v_step = integral_steps(dv, symbol)
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
def parts_rule(integral):
integrand, symbol = integral
constant, integrand = integrand.as_coeff_Mul()
result = _parts_rule(integrand, symbol)
steps = []
if result:
u, dv, v, du, v_step = result
steps.append(result)
if isinstance(v, sympy.Integral):
return
while True:
if symbol not in (integrand / (v * du)).cancel().free_symbols:
coefficient = ((v * du) / integrand).cancel()
rule = CyclicPartsRule(
[PartsRule(u, dv, v_step, None, None, None)
for (u, dv, v, du, v_step) in steps],
(-1) ** len(steps) * coefficient,
integrand, symbol
)
if constant != 1:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
result = _parts_rule(v * du, symbol)
if result:
u, dv, v, du, v_step = result
steps.append(result)
else:
break
def make_second_step(steps, integrand):
if steps:
u, dv, v, du, v_step = steps[0]
return PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
else:
return integral_steps(integrand, symbol)
if steps:
u, dv, v, du, v_step = steps[0]
rule = PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
if constant != 1:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
def trig_rule(integral):
integrand, symbol = integral
if isinstance(integrand, sympy.sin) or isinstance(integrand, sympy.cos):
arg = integrand.args[0]
if not isinstance(arg, sympy.Symbol):
return # perhaps a substitution can deal with it
if isinstance(integrand, sympy.sin):
func = 'sin'
else:
func = 'cos'
return TrigRule(func, arg, integrand, symbol)
if integrand == sympy.sec(symbol)**2:
return TrigRule('sec**2', symbol, integrand, symbol)
elif integrand == sympy.csc(symbol)**2:
return TrigRule('csc**2', symbol, integrand, symbol)
if isinstance(integrand, sympy.tan):
rewritten = sympy.sin(*integrand.args) / sympy.cos(*integrand.args)
elif isinstance(integrand, sympy.cot):
rewritten = sympy.cos(*integrand.args) / sympy.sin(*integrand.args)
elif isinstance(integrand, sympy.sec):
arg = integrand.args[0]
rewritten = ((sympy.sec(arg)**2 + sympy.tan(arg) * sympy.sec(arg)) /
(sympy.sec(arg) + sympy.tan(arg)))
elif isinstance(integrand, sympy.csc):
arg = integrand.args[0]
rewritten = ((sympy.csc(arg)**2 + sympy.cot(arg) * sympy.csc(arg)) /
(sympy.csc(arg) + sympy.cot(arg)))
else:
return
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol
)
def trig_product_rule(integral):
integrand, symbol = integral
sectan = sympy.sec(symbol) * sympy.tan(symbol)
q = integrand / sectan
if symbol not in q.free_symbols:
rule = TrigRule('sec*tan', symbol, sectan, symbol)
if q != 1:
rule = ConstantTimesRule(q, sectan, rule, integrand, symbol)
return rule
csccot = -sympy.csc(symbol) * sympy.cot(symbol)
q = integrand / csccot
if symbol not in q.free_symbols:
rule = TrigRule('csc*cot', symbol, csccot, symbol)
if q != 1:
rule = ConstantTimesRule(q, csccot, rule, integrand, symbol)
return rule
def heaviside_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return HeavisideRule(integrand.args[0], integrand, symbol)
# else perhaps substitution can handle this
@sympy.cacheit
def make_wilds(symbol):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
m = sympy.Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
n = sympy.Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
return a, b, m, n
@sympy.cacheit
def sincos_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.sin(a*symbol)**m * sympy.cos(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def tansec_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.tan(a*symbol)**m * sympy.sec(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def cotcsc_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.cot(a*symbol)**m * sympy.csc(b*symbol)**n
return pattern, a, b, m, n
def uncurry(func):
def uncurry_rl(args):
return func(*args)
return uncurry_rl
def trig_rewriter(rewrite):
def trig_rewriter_rl(args):
a, b, m, n, integrand, symbol = args
rewritten = rewrite(a, b, m, n, integrand, symbol)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return trig_rewriter_rl
sincos_botheven_condition = uncurry(
lambda a, b, m, n, i, s: m.is_even and n.is_even and
m.is_nonnegative and n.is_nonnegative)
sincos_botheven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (((1 - sympy.cos(2*a*symbol)) / 2) ** (m / 2)) *
(((1 + sympy.cos(2*b*symbol)) / 2) ** (n / 2)) ))
sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3)
sincos_sinodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.cos(a*symbol)**2)**((m - 1) / 2) *
sympy.sin(a*symbol) *
sympy.cos(b*symbol) ** n))
sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3)
sincos_cosodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.sin(b*symbol)**2)**((n - 1) / 2) *
sympy.cos(b*symbol) *
sympy.sin(a*symbol) ** m))
tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
tansec_seceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.tan(b*symbol)**2) ** (n/2 - 1) *
sympy.sec(b*symbol)**2 *
sympy.tan(a*symbol) ** m ))
tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
tansec_tanodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.sec(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.tan(a*symbol) *
sympy.sec(b*symbol) ** n ))
tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0)
tan_tansquared = trig_rewriter(
lambda a, b, m, n, i, symbol: ( sympy.sec(a*symbol)**2 - 1))
cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
cotcsc_csceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.cot(b*symbol)**2) ** (n/2 - 1) *
sympy.csc(b*symbol)**2 *
sympy.cot(a*symbol) ** m ))
cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
cotcsc_cotodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.csc(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.cot(a*symbol) *
sympy.csc(b*symbol) ** n ))
def trig_sincos_rule(integral):
integrand, symbol = integral
if any(integrand.has(f) for f in (sympy.sin, sympy.cos)):
pattern, a, b, m, n = sincos_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0), match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
sincos_botheven_condition: sincos_botheven,
sincos_sinodd_condition: sincos_sinodd,
sincos_cosodd_condition: sincos_cosodd
})((a, b, m, n, integrand, symbol))
def trig_tansec_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.cos(symbol): sympy.sec(symbol)
})
if any(integrand.has(f) for f in (sympy.tan, sympy.sec)):
pattern, a, b, m, n = tansec_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0),match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
tansec_tanodd_condition: tansec_tanodd,
tansec_seceven_condition: tansec_seceven,
tan_tansquared_condition: tan_tansquared
})((a, b, m, n, integrand, symbol))
def trig_cotcsc_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.sin(symbol): sympy.csc(symbol),
1 / sympy.tan(symbol): sympy.cot(symbol),
sympy.cos(symbol) / sympy.tan(symbol): sympy.cot(symbol)
})
if any(integrand.has(f) for f in (sympy.cot, sympy.csc)):
pattern, a, b, m, n = cotcsc_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0),match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
cotcsc_cotodd_condition: cotcsc_cotodd,
cotcsc_csceven_condition: cotcsc_csceven
})((a, b, m, n, integrand, symbol))
def trig_powers_products_rule(integral):
return do_one(null_safe(trig_sincos_rule),
null_safe(trig_tansec_rule),
null_safe(trig_cotcsc_rule))(integral)
def trig_substitution_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[0, symbol])
b = sympy.Wild('b', exclude=[0, symbol])
theta = sympy.Dummy("theta")
matches = integrand.find(a + b*symbol**2)
if matches:
for expr in matches:
match = expr.match(a + b*symbol**2)
a = match[a]
b = match[b]
a_positive = ((a.is_number and a > 0) or a.is_positive)
b_positive = ((b.is_number and b > 0) or b.is_positive)
x_func = None
if a_positive and b_positive:
# a**2 + b*x**2
x_func = (sympy.sqrt(a)/sympy.sqrt(b)) * sympy.tan(theta)
elif a_positive and not b_positive:
# a**2 - b*x**2
x_func = (sympy.sqrt(a)/sympy.sqrt(-b)) * sympy.sin(theta)
elif not a_positive and b_positive:
# b*x**2 - a**2
x_func = (sympy.sqrt(-a)/sympy.sqrt(b)) * sympy.sec(theta)
if x_func:
replaced = integrand.subs(symbol, x_func).trigsimp()
if not replaced.has(symbol):
replaced *= manual_diff(x_func, theta)
replaced = replaced.trigsimp()
secants = replaced.find(1/sympy.cos(theta))
if secants:
replaced = replaced.xreplace({
1/sympy.cos(theta): sympy.sec(theta)
})
substep = integral_steps(replaced, theta)
if not contains_dont_know(substep):
return TrigSubstitutionRule(
theta, x_func, replaced, substep, integrand, symbol)
def substitution_rule(integral):
integrand, symbol = integral
u_var = sympy.Dummy("u")
substitutions = find_substitutions(integrand, symbol, u_var)
if substitutions:
ways = []
for u_func, c, substituted in substitutions:
subrule = integral_steps(substituted, u_var)
if contains_dont_know(subrule):
continue
if sympy.simplify(c - 1) != 0:
_, denom = c.as_numer_denom()
subrule = ConstantTimesRule(c, substituted, subrule, substituted, u_var)
if denom.free_symbols:
piecewise = []
could_be_zero = []
if isinstance(denom, sympy.Mul):
could_be_zero = denom.args
else:
could_be_zero.append(denom)
for expr in could_be_zero:
if not sympy.ask(~sympy.Q.zero(expr)):
substep = integral_steps(integrand.subs(expr, 0), symbol)
if substep:
piecewise.append((
substep,
sympy.Eq(expr, 0)
))
piecewise.append((subrule, True))
subrule = PiecewiseRule(piecewise, substituted, symbol)
ways.append(URule(u_var, u_func, c,
subrule,
integrand, symbol))
if len(ways) > 1:
return AlternativeRule(ways, integrand, symbol)
elif ways:
return ways[0]
elif integrand.has(sympy.exp):
u_func = sympy.exp(symbol)
c = 1
substituted = integrand / u_func.diff(symbol)
substituted = substituted.subs(u_func, u_var)
if symbol not in substituted.free_symbols:
return URule(u_var, u_func, c,
integral_steps(substituted, u_var),
integrand, symbol)
partial_fractions_rule = rewriter(
lambda integrand, symbol: integrand.is_rational_function(),
lambda integrand, symbol: integrand.apart(symbol))
distribute_expand_rule = rewriter(
lambda integrand, symbol: (
all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args)
or isinstance(integrand, sympy.Pow)
or isinstance(integrand, sympy.Mul)),
lambda integrand, symbol: integrand.expand())
def derivative_rule(integral):
variables = integral[0].args[1:]
if variables[-1] == integral.symbol:
return DerivativeRule(*integral)
else:
return ConstantRule(integral.integrand, *integral)
def rewrites_rule(integral):
integrand, symbol = integral
if integrand.match(1/sympy.cos(symbol)):
rewritten = integrand.subs(1/sympy.cos(symbol), sympy.sec(symbol))
return RewriteRule(rewritten, integral_steps(rewritten, symbol), integrand, symbol)
def fallback_rule(integral):
return DontKnowRule(*integral)
# Cache is used to break cyclic integrals
_integral_cache = {}
def integral_steps(integrand, symbol, **options):
"""Returns the steps needed to compute an integral.
This function attempts to mirror what a student would do by hand as
closely as possible.
SymPy Gamma uses this to provide a step-by-step explanation of an
integral. The code it uses to format the results of this function can be
found at
https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py.
Examples
========
>>> from sympy import exp, sin, cos
>>> from sympy.integrals.manualintegrate import integral_steps
>>> from sympy.abc import x
>>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \
# doctest: +NORMALIZE_WHITESPACE
URule(u_var=_u, u_func=exp(x), constant=1,
substep=ArctanRule(context=1/(_u**2 + 1), symbol=_u),
context=exp(x)/(exp(2*x) + 1), symbol=x)
>>> print(repr(integral_steps(sin(x), x))) \
# doctest: +NORMALIZE_WHITESPACE
TrigRule(func='sin', arg=x, context=sin(x), symbol=x)
>>> print(repr(integral_steps((x**2 + 3)**2 , x))) \
# doctest: +NORMALIZE_WHITESPACE
RewriteRule(rewritten=x**4 + 6*x**2 + 9,
substep=AddRule(substeps=[PowerRule(base=x, exp=4, context=x**4, symbol=x),
ConstantTimesRule(constant=6, other=x**2,
substep=PowerRule(base=x, exp=2, context=x**2, symbol=x),
context=6*x**2, symbol=x),
ConstantRule(constant=9, context=9, symbol=x)],
context=x**4 + 6*x**2 + 9, symbol=x), context=(x**2 + 3)**2, symbol=x)
Returns
=======
rule : namedtuple
The first step; most rules have substeps that must also be
considered. These substeps can be evaluated using ``manualintegrate``
to obtain a result.
"""
cachekey = (integrand, symbol)
if cachekey in _integral_cache:
if _integral_cache[cachekey] is None:
# cyclic integral! null_safe will eliminate that path
return None
else:
return _integral_cache[cachekey]
else:
_integral_cache[cachekey] = None
integral = IntegralInfo(integrand, symbol)
def key(integral):
integrand = integral.integrand
if isinstance(integrand, TrigonometricFunction):
return TrigonometricFunction
elif isinstance(integrand, sympy.Derivative):
return sympy.Derivative
elif symbol not in integrand.free_symbols:
return sympy.Number
else:
for cls in (sympy.Pow, sympy.Symbol, sympy.exp, sympy.log,
sympy.Add, sympy.Mul, sympy.atan, sympy.asin, sympy.acos, sympy.Heaviside):
if isinstance(integrand, cls):
return cls
def integral_is_subclass(*klasses):
def _integral_is_subclass(integral):
k = key(integral)
return k and issubclass(k, klasses)
return _integral_is_subclass
result = do_one(
null_safe(switch(key, {
sympy.Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule)),
sympy.Symbol: power_rule,
sympy.exp: exp_rule,
sympy.Add: add_rule,
sympy.Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule)),
sympy.Derivative: derivative_rule,
TrigonometricFunction: trig_rule,
sympy.Heaviside: heaviside_rule,
sympy.Number: constant_rule
})),
do_one(
null_safe(trig_rule),
null_safe(alternatives(
rewrites_rule,
substitution_rule,
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
partial_fractions_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.log, sympy.atan, sympy.asin, sympy.acos),
parts_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
distribute_expand_rule),
trig_powers_products_rule
)),
null_safe(trig_substitution_rule)
),
fallback_rule)(integral)
del _integral_cache[cachekey]
return result
@evaluates(ConstantRule)
def eval_constant(constant, integrand, symbol):
return constant * symbol
@evaluates(ConstantTimesRule)
def eval_constanttimes(constant, other, substep, integrand, symbol):
return constant * _manualintegrate(substep)
@evaluates(PowerRule)
def eval_power(base, exp, integrand, symbol):
return (base ** (exp + 1)) / (exp + 1)
@evaluates(ExpRule)
def eval_exp(base, exp, integrand, symbol):
return integrand / sympy.ln(base)
@evaluates(AddRule)
def eval_add(substeps, integrand, symbol):
return sum(map(_manualintegrate, substeps))
@evaluates(URule)
def eval_u(u_var, u_func, constant, substep, integrand, symbol):
result = _manualintegrate(substep)
return result.subs(u_var, u_func)
@evaluates(PartsRule)
def eval_parts(u, dv, v_step, second_step, integrand, symbol):
v = _manualintegrate(v_step)
return u * v - _manualintegrate(second_step)
@evaluates(CyclicPartsRule)
def eval_cyclicparts(parts_rules, coefficient, integrand, symbol):
coefficient = 1 - coefficient
result = []
sign = 1
for rule in parts_rules:
result.append(sign * rule.u * _manualintegrate(rule.v_step))
sign *= -1
return sympy.Add(*result) / coefficient
@evaluates(TrigRule)
def eval_trig(func, arg, integrand, symbol):
if func == 'sin':
return -sympy.cos(arg)
elif func == 'cos':
return sympy.sin(arg)
elif func == 'sec*tan':
return sympy.sec(arg)
elif func == 'csc*cot':
return sympy.csc(arg)
elif func == 'sec**2':
return sympy.tan(arg)
elif func == 'csc**2':
return -sympy.cot(arg)
@evaluates(ReciprocalRule)
def eval_reciprocal(func, integrand, symbol):
return sympy.ln(func)
@evaluates(ArctanRule)
def eval_arctan(integrand, symbol):
return sympy.atan(symbol)
@evaluates(ArcsinRule)
def eval_arcsin(integrand, symbol):
return sympy.asin(symbol)
@evaluates(InverseHyperbolicRule)
def eval_inversehyperbolic(func, integrand, symbol):
return func(symbol)
@evaluates(AlternativeRule)
def eval_alternative(alternatives, integrand, symbol):
return _manualintegrate(alternatives[0])
@evaluates(RewriteRule)
def eval_rewrite(rewritten, substep, integrand, symbol):
return _manualintegrate(substep)
@evaluates(PiecewiseRule)
def eval_piecewise(substeps, integrand, symbol):
return sympy.Piecewise(*[(_manualintegrate(substep), cond)
for substep, cond in substeps])
@evaluates(TrigSubstitutionRule)
def eval_trigsubstitution(theta, func, rewritten, substep, integrand, symbol):
func = func.subs(sympy.sec(theta), 1/sympy.cos(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = sympy.solve(symbol - func, trig_function)
assert len(relation) == 1
numer, denom = sympy.fraction(relation[0])
if isinstance(trig_function, sympy.sin):
opposite = numer
hypotenuse = denom
adjacent = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.asin(relation[0])
elif isinstance(trig_function, sympy.cos):
adjacent = numer
hypotenuse = denom
opposite = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.acos(relation[0])
elif isinstance(trig_function, sympy.tan):
opposite = numer
adjacent = denom
hypotenuse = sympy.sqrt(denom**2 + numer**2)
inverse = sympy.atan(relation[0])
substitution = [
(sympy.sin(theta), opposite/hypotenuse),
(sympy.cos(theta), adjacent/hypotenuse),
(sympy.tan(theta), opposite/adjacent),
(theta, inverse)
]
return _manualintegrate(substep).subs(substitution).trigsimp()
@evaluates(DerivativeRule)
def eval_derivativerule(integrand, symbol):
# isinstance(integrand, Derivative) should be True
if len(integrand.args) == 2:
return integrand.args[0]
else:
return sympy.Derivative(integrand.args[0], *integrand.args[1:-1])
@evaluates(HeavisideRule)
def eval_heaviside(arg, integrand, symbol):
# this result can also be represented as sympy.Max(0, arg)
return arg*sympy.Heaviside(arg)
@evaluates(DontKnowRule)
def eval_dontknowrule(integrand, symbol):
return sympy.Integral(integrand, symbol)
def _manualintegrate(rule):
evaluator = evaluators.get(rule.__class__)
if not evaluator:
raise ValueError("Cannot evaluate rule %s" % repr(rule))
return evaluator(*rule)
def manualintegrate(f, var):
"""manualintegrate(f, var)
Compute indefinite integral of a single variable using an algorithm that
resembles what a student would do by hand.
Unlike ``integrate``, var can only be a single symbol.
Examples
========
>>> from sympy import sin, cos, tan, exp, log, integrate
>>> from sympy.integrals.manualintegrate import manualintegrate
>>> from sympy.abc import x
>>> manualintegrate(1 / x, x)
log(x)
>>> integrate(1/x)
log(x)
>>> manualintegrate(log(x), x)
x*log(x) - x
>>> integrate(log(x))
x*log(x) - x
>>> manualintegrate(exp(x) / (1 + exp(2 * x)), x)
atan(exp(x))
>>> integrate(exp(x) / (1 + exp(2 * x)))
RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x))))
>>> manualintegrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> manualintegrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> manualintegrate(tan(x), x)
-log(cos(x))
>>> integrate(tan(x), x)
-log(sin(x)**2 - 1)/2
See Also
========
sympy.integrals.integrals.integrate
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
return _manualintegrate(integral_steps(f, var))
| bsd-3-clause | -7,936,689,559,096,110,000 | 35.01448 | 99 | 0.585561 | false |
sipdbg/sipdbg | logger.py | 1 | 1803 | import sys
import logging
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace(
"$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
GREY, RED, GREEN, YELLOW , BLUE, PURPLE, AZUR, WHITE, BLACK = range (9)
COLORS = {
'DEBUG' : YELLOW,
'INFO' : GREEN,
'WARNING' : RED,
'ERROR' : BLACK,
'CRITICAL' : BLACK
}
class ColoredFormatter (logging.Formatter):
def __init__ (self, msg, use_color = True):
logging.Formatter.__init__ (self, msg)
self.use_color = use_color
def format (self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS [levelname]) + levelname [:1] + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format (self, record)
class ColoredLogger (logging.Logger):
FORMAT = "[%(levelname)s] %(message)s"
COLOR_FORMAT = formatter_message (FORMAT, True)
def __init__ (self, name):
logging.Logger.__init__ (self, name, logging.INFO)
color_formatter = ColoredFormatter (self.COLOR_FORMAT)
console = logging.StreamHandler (sys.stdout)
console.setFormatter (color_formatter)
self.addHandler (console)
return
if '__main__' == __name__:
logging.setLoggerClass (ColoredLogger)
logger = ColoredLogger ("MyTestLogger")
logger.debug ("debugmsg")
logger.info ("infomsg")
logger.warn ("warnmsg")
logger.error ("errormsg")
# http://docs.python.org/2/library/logging.handlers.html#memoryhandler
| gpl-2.0 | -2,628,304,185,319,130,000 | 31.196429 | 96 | 0.621742 | false |
Hellowlol/PyTunes | pytunes/server.py | 1 | 4798 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Initiate the HTTP server according to settings """
import os
import sys
import cherrypy
import pytunes
import logging
from cherrypy.process.plugins import Daemonizer, PIDFile
from cherrypy.lib.auth_digest import get_ha1_dict_plain
def start():
""" Main function for starting PyTunes server """
logger = logging.getLogger('pytunes.server')
logger.debug("Setting up to start cherrypy")
ssl = ''
secure = ''
# Set server ip, port and root
cherrypy.config.update({
'server.socket_host': pytunes.HOST,
'server.socket_port': pytunes.PORT,
'log.screen': False,
'server.thread_pool': 30,
'server.socket_queue_size': 30,
'server.request_queue_size': 50
})
# Set server environment to production unless when debugging
if not pytunes.DEBUG:
cherrypy.config.update({
'environment': 'production'
})
# Enable SSL
if pytunes.SSLCERT and pytunes.SSLKEY:
#cert_dir = os.path.join(pytunes.RUNDIR, "userdata/")
print os.path.join(pytunes.RUNDIR, "userdata/", pytunes.SSLCERT)
ssl = 's'
secure = 'Secure '
cherrypy.config.update({
'server.ssl_module': 'builtin',
'server.ssl_certificate': os.path.join(pytunes.RUNDIR, "userdata/", pytunes.SSLCERT),
'server.ssl_private_key': os.path.join(pytunes.RUNDIR, "userdata/", pytunes.SSLKEY)
})
# Daemonize cherrypy if specified
if pytunes.DAEMON:
if sys.platform == 'win32':
logger.error("You are using Windows - I cannot setup daemon mode. Please use the pythonw executable instead.")
logger.error("More information at http://docs.python.org/2/using/windows.html.")
else:
Daemonizer(cherrypy.engine).subscribe()
# Create PID if specified
if pytunes.PID:
PIDFile(cherrypy.engine, pytunes.PID).subscribe()
# Set static directories
webdir = os.path.join(pytunes.RUNDIR, pytunes.TEMPLATE)
favicon = os.path.join(webdir, "img/favicon.ico")
app_config = {
'/': {
'tools.staticdir.root': webdir,
'tools.staticdir.dir': 'static',
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8',
'tools.gzip.on': True,
'tools.gzip.mime_types': ['text/html', 'text/plain', 'text/css', 'text/javascript', 'application/json', 'application/javascript']
},
'/js': {
'tools.caching.on': True,
'tools.caching.force': True,
'tools.caching.delay': 0,
'tools.expires.on': True,
'tools.expires.secs': 60 * 60 * 6,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'
},
'/css': {
'tools.caching.on': True,
'tools.caching.force': True,
'tools.caching.delay': 0,
'tools.expires.on': True,
'tools.expires.secs': 60 * 60 * 6,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'css'
},
'/img': {
'tools.caching.on': True,
'tools.caching.force': True,
'tools.caching.delay': 0,
'tools.expires.on': True,
'tools.expires.secs': 60 * 60 * 6,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'img'
},
'/favicon.ico': {
'tools.caching.on': True,
'tools.caching.force': True,
'tools.caching.delay': 0,
'tools.expires.on': True,
'tools.expires.secs': 60 * 60 * 6,
'tools.staticfile.on': True,
'tools.staticfile.filename': favicon
},
}
# Require username and password if they are set
if pytunes.USERNAME and pytunes.PASSWORD:
logger.info("Enabling username/password access")
userpassdict = {pytunes.USERNAME: pytunes.PASSWORD}
get_ha1 = get_ha1_dict_plain(userpassdict)
app_config['/'].update({
'tools.auth_digest.on': True,
'tools.auth_digest.realm': "PyTunes",
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb'
})
# Start the CherryPy server (remove trailing slash from webdir)
logger.info("Starting up webserver")
print '******************************************************'
print 'Starting Pytunes on %sPort %s.' % (secure, str(pytunes.PORT))
print 'Start your browser and go to http%s://localhost:%s/%s' % (ssl, str(pytunes.PORT), pytunes.WEBDIR[:-1])
print '******************************************************'
cherrypy.quickstart(pytunes.ROOT, pytunes.WEBDIR[:-1], config=app_config)
| gpl-3.0 | 3,988,609,484,609,822,000 | 36.193798 | 141 | 0.566069 | false |
forrestgtran/TeamX | handwritingRecognition/performRecognitionAlpha.py | 1 | 2865 | # Import the modules
import cv2
from sklearn.externals import joblib
from skimage.feature import hog
import numpy as np
from PIL import Image
# Load the classifier
clf = joblib.load("digits_cls_alpha6.pkl")
# Read the input image
im = cv2.imread("adrienne2b.jpg")
im_pil = Image.open("adrienne2b.jpg")
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# store tuples of size 2 digit/alpha bounding boxes
# the two elements in the tuple represent the vertices of the bounding box on the image
digit_alphabet_bounding_boxes = []
# cropped_images = []
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
for rect in rects:
# Draw the rectangles
cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
v1 = (rect[0], rect[1])
v2 = (rect[0] + rect[2], rect[1] + rect[3])
# append bounding box
# digit_alphabet_bounding_boxes.append((v1,v2))
print "v1"
print rect[0]
print rect[1]
print " - - - "
print "v2"
print rect[0] + rect[2]
print rect[1] + rect[3]
print " - - - "
print "rect[0]", rect[0]
print "rect[1]", rect[1]
print "rect[2]", rect[2]
print "rect[3]", rect[3]
box = (rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
digit_alphabet_bounding_boxes.append(box)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# Calculate the HOG features
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
nbr = clf.predict(np.array([roi_hog_fd], 'float64'))
cv2.putText(im, str(nbr[0]), (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
print "# # # # IDENTIFIED ITEM", str(nbr[0])
# ^ ^ IDENTIFIED NUMBER = str(int(nbr[0]))
digit_alphabet_bounding_boxes2 = sorted(digit_alphabet_bounding_boxes, key=lambda x: x[0])
i=0
for item in digit_alphabet_bounding_boxes2:
temp_region = im_pil.crop(item)
temp_str = 'jeremy2region' + str(i)
temp_region.save(temp_str, 'jpeg')
i += 1
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.waitKey()
# input: Image
# output: number
| apache-2.0 | -8,676,434,699,366,484,000 | 27.65 | 108 | 0.647469 | false |
rporter/verilog_integration | test/test_vpi.py | 1 | 3938 | # Copyright (c) 2012, 2013 Rich Porter - see LICENSE for further details
import message
import random
import test
import verilog
################################################################################
message.control.ERROR.threshold = 100
# use verilog callback on each clock
class cbClk(verilog.callback) :
ITERATIONS=200
class assign(object) :
types = {
True : [verilog.vpiBinStr, verilog.vpiOctStr, verilog.vpiHexStr],
False : [verilog.vpiBinStr, verilog.vpiOctStr, verilog.vpiHexStr, verilog.vpiDecStr]
}
def __init__(self, size, scope) :
self.size = size
self.scope = scope
self.mask = (1L << size)-1L
def get(self) :
if not hasattr(self, 'bits') : return
# cross these choices
sig0 = self.scope.sig0.get_value(self.choice())
sig1 = self.scope.sig1.get_value(self.choice())
self.check(sig0, sig1)
def check(self, sig0, sig1) :
if long(sig0) != long(sig1) :
message.error("sig0(%x, %s) != sig1(%x, %s) when value(%d'h%x)" % (long(sig0), repr(sig0), long(sig1), repr(sig1), self.size, self.bits))
if long(sig0) != self.val :
message.error("sig0(%x, %s) != value(%x) when value(%d'h%x)" % (long(sig0), repr(sig0), self.bits, self.size, self.bits))
if long(sig1) != self.val :
message.error("sig1(%x, %s) != value(%x) when value(%d'h%x)" % (long(sig1), repr(sig1), self.bits, self.size, self.bits))
def put(self) :
self.scope.direct.sig0 = self.value(self.rand())
self.scope.direct.sig1 = self.value()
def value(self, bits=None) :
if bits is None : bits = self.bits
choice = self.choice()
if choice == verilog.vpiDecStr :
# can't put too many bits in else > MAXINT is used
bits &= self.mask
return choice(bits)
def choice(self) :
return random.choice(self.types[self.size > 32])
def rand(self) :
# choose either the vector length or something else
size = random.choice((self.size, random.choice(range(1,300))))
self.bits = random.getrandbits(size)
return self.bits
@property
def val(self) :
return self.bits & self.mask
def __init__(self, obj, simctrl, array) :
verilog.callback.__init__(self, name='clock callback', obj=obj, reason=verilog.callback.cbValueChange, func=self.execute)
self.blks = [self.fcty(*l) for l in array.iteritems()]
self.simctrl = simctrl
self.count = 0
def execute(self) :
for blk in self.blks :
if self.count & 1 :
blk.get()
else :
blk.put()
self.count += 1
if self.count == self.ITERATIONS :
# stop
self.simctrl.direct.sim_ctrl_finish_r = 1
def cb_filter(self) :
# ignore rising edge
return not int(self.obj)
def fcty(self, *args) :
'object factory'
return self.assign(*args)
################################################################################
class test_vpi(test.test) :
name='test vpi'
MAX_INSTS=255
TIMEOUT=200
def prologue(self) :
# initialize random seed with deterministic value
seed = test.plusargs().get('seed', 1)
random.seed(seed)
simctrl = verilog.scope('example.simctrl_0_u')
arr = dict([(i, verilog.scope('example.duv_0_u.arr[%d].arr' % i)) for i in range(1,self.MAX_INSTS)])
# up timeout beyond test time
simctrl.direct.sim_ctrl_timeout_i = self.TIMEOUT
# reduce time step
simctrl.direct.sim_ctrl_cycles_freq_i = 1
for scope in arr.values() :
scope.direct.verbose = 0 # display values
# register call back
cbClk0 = self.cb_fcty(simctrl.sim_ctrl_clk_op.set_type(verilog.vpiInt), simctrl, arr)
def epilogue(self) :
self.success()
def cb_fcty(self, *args) :
'object factory'
return cbClk(*args)
################################################################################
if __name__ == '__main__' :
testing = test_vpi()
| mit | -1,703,411,440,200,153,000 | 30.253968 | 145 | 0.584307 | false |
8l/luz-cpu | luz_asm_sim/lib/simlib/interactive_cli.py | 1 | 4504 | # Interactive command-line simulation functions
#
# Luz micro-controller simulator
# Eli Bendersky (C) 2008-2010
#
import sys
from .luzsim import LuzSim
from ..asmlib.disassembler import disassemble
from ..asmlib.asm_instructions import register_alias_of
from ..commonlib.utils import word2bytes
from ..commonlib.portability import printme, get_input
def print_regs(sim, replace_alias=True):
for i in range(32):
if replace_alias:
regname = register_alias_of[i]
else:
regname = "$r%s" % i
printme('%-5s = 0x%08X' % (regname, sim.reg_value(i)))
if i % 4 == 3:
printme('\n')
else:
printme(' ')
printme('\n')
def do_step(sim):
instr_word = sim.memory.read_instruction(sim.pc)
sim.step()
def show_memory(sim, addr):
for linenum in range(4):
printme("0x%08X: " % (addr + linenum * 16,))
for wordnum in range(4):
waddr = addr + linenum * 16 + wordnum * 4
memword = sim.memory.read_mem(waddr, width=4)
bytes = word2bytes(memword)
for b in bytes:
printme("%02X" % b)
printme(' ')
printme('\n')
help_message = r'''
Supported commands:
s [nsteps] Single step. If 'nsteps' is specified, then 'nsteps'
steps are done.
r Print the contents of all registers
sr Single step and print the contents of all registers
m <addr> Show memory contents at <addr>
rst Restart the simulator
? or help Print this help message
q Quit the simulator
set <param> <value>
Set parameter value (see next section)
Parameters:
alias 1 to show alias names of registers, 0 to show plain
register names.
'''
def print_help():
printme(help_message + '\n')
def interactive_cli_sim(img):
""" An interactive command-line simulation.
img: Executable image
"""
sim = LuzSim(img)
printme('\nLUZ simulator started at 0x%08X\n\n' % sim.pc)
params = {
'alias': True,
}
while True:
try:
# show the current instruction
instr_disasm = disassemble(
word=sim.memory.read_instruction(sim.pc),
replace_alias=params['alias'])
# get a command from the user
line = get_input('[0x%08X] [%s] >> ' % (sim.pc, instr_disasm)).strip()
# skip empty lines
if not line.strip():
continue
cmd, args = parse_cmd(line)
if cmd == 's':
if len(args) >= 1:
nsteps = int(args[0])
else:
nsteps = 1
for i in range(nsteps):
do_step(sim)
elif cmd == 'q':
return
elif cmd == 'rst':
sim.restart()
printme('Restarted\n')
elif cmd == 'r':
print_regs(sim, replace_alias=params['alias'])
elif cmd == 'sr':
do_step(sim)
print_regs(sim, replace_alias=params['alias'])
elif cmd == 'm':
addr = args[0]
show_memory(sim, eval(addr))
elif cmd == 'set':
if len(args) != 2:
printme("Error: invalid command\n")
continue
param, value = args[0], args[1]
if param in params:
params[param] = eval(value)
else:
printme("Error: no such parameter '%s'\n" % param)
elif cmd == '?' or cmd == 'help':
print_help()
else:
printme('Unknown command. To get some help, type ? or help\n')
except Exception:
e = sys.exc_info()[1]
printme('\n!!ERROR!!: %s %s\n' % (type(e), str(e)))
def parse_cmd(line):
""" Parses a command
"""
tokens = [t.strip() for t in line.split()]
return tokens[0], tokens[1:]
| unlicense | -4,718,183,565,284,314,000 | 26.687898 | 82 | 0.461146 | false |
geonexus/fiware-cloto | fiware_cloto/cloto/tests/acceptance_tests/component/tenant_information/features/tenant_information.py | 1 | 4559 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
#
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from lettuce import step, world, before
from commons.rest_utils import RestUtils
from commons.constants import TENANT_DOC, TENANT_OWNER, TENANT_VERSION, TENANT_WSIZE, TENANT_DEFAULT_DOC
from commons.configuration import HEADERS, TENANT_ID
import commons.utils as Utils
import commons.authentication as Auth
api_utils = RestUtils()
@before.each_feature
def setup_feature(feature):
token_id, world.tenant_id = Auth.get_token()
HEADERS['X-Auth-Token'] = token_id
@before.each_scenario
def setup(scenario):
#Set default headers with correct token before every scenario
world.headers = HEADERS
@step(u'the tenant "([^"]*)"')
def set_tenant_id(step, tenant_id):
world.tenant_id = tenant_id
@step(u'created tenant')
def set_default_tenant(step):
#Set default tenant_id as a global variable
world.tenant_id = TENANT_ID
@step(u'I retrieve the tenant information')
def retrieve_tenant_information(step):
world.req = api_utils.retrieve_information(tenant_id=world.tenant_id, headers=world.headers)
@step(u'I get the following information:')
def check_tenant_information(step):
assert world.req.ok, 'Invalid HTTP status code. Status Code obtained is: {}'.format(world.req.status_code)
response = Utils.assert_json_format(world.req)
for expected_result in step.hashes:
assert response[TENANT_DOC] == TENANT_DEFAULT_DOC, 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_DOC, TENANT_DEFAULT_DOC,
TENANT_DOC, response[TENANT_DOC])
assert response[TENANT_OWNER] == expected_result[TENANT_OWNER], 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_OWNER,
expected_result[TENANT_OWNER],
TENANT_OWNER,
response[TENANT_OWNER])
assert TENANT_VERSION in response, 'API Version not found in the response'
assert TENANT_WSIZE in response, 'WindowSize value not found in the API response'
@step(u'I obtain an "([^"]*)" and the "([^"]*)"')
def assert_error_response(step, error_code, fault_element):
Utils.assert_error_code_error(response=world.req, expected_error_code=error_code,
expected_fault_element=fault_element)
@step(u'incorrect "([^"]*)"')
def set_incorrect_token(step, token):
#Set and incorrect header to obtain unauthorized error
world.headers = Utils.create_header(token=token)
@step(u'I update the "([^"]*)"')
def update_window_size(step, window_size):
try:
world.window_size = int(window_size)
except ValueError:
print 'Window Size can not be converted to integer'
world.window_size = window_size
world.req = api_utils.update_window_size(tenant_id=world.tenant_id, window_size=world.window_size,
headers=world.headers)
@step(u'the "([^"]*)" is update in Policy Manager')
def assert_window_size(step, window_size):
assert world.req.ok, str(world.req.status_code) + world.req.content
response = Utils.assert_json_format(world.req)
assert str(response[TENANT_WSIZE]) == window_size
world.req = api_utils.retrieve_information(tenant_id=world.tenant_id, headers=world.headers)
response = Utils.assert_json_format(world.req)
assert str(response[TENANT_WSIZE]) == window_size
| apache-2.0 | -3,880,594,550,653,137,000 | 33.263158 | 114 | 0.633531 | false |
ingolemo/cmd-utils | timepick.py | 1 | 1388 | #!/usr/bin/env python
"""Usage: timepick [second|minute|hour|day|week]
A filter that picks a single line from stdin based on the current time.
This has an advantage over random selection in that it's cyclical and
thus no clustering or repetition of the selections; seems 'more random'
to people.
"""
import os
import sys
import datetime
pdict = {
"second": lambda a: (a.days * 24 * 60 * 60) + a.seconds,
"minute": lambda a: (a.days * 24 * 60) + (a.seconds / 60),
"hour": lambda a: (a.days * 24) + (a.seconds / 3600),
"day": lambda a: a.days,
"week": lambda a: a.days / 7,
}
def numSinceEpoch(period):
td = datetime.datetime.now() - datetime.datetime.fromtimestamp(0)
return abs(int(pdict[period](td)))
def main(argv):
if not argv[1:] or "-h" in argv or "--help" in argv:
return __doc__
try:
div = argv[1]
if div.endswith("s"):
div = div[:-1]
except IndexError:
div = object()
if div not in pdict:
return "usage: {0} [{1}]".format(
os.path.basename(argv[0]), "|".join(sorted(pdict.keys()))
)
choices = sys.stdin.readlines()
try:
lineno = numSinceEpoch(div) % len(choices)
except ZeroDivisionError:
pass
else:
choice = choices[lineno].strip()
print(choice)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-3.0 | -4,550,585,445,860,526,000 | 23.785714 | 71 | 0.59366 | false |
mozilla/kuma | kuma/search/tests/__init__.py | 1 | 2536 |
from django.conf import settings
from elasticsearch.exceptions import ConnectionError
from elasticsearch_dsl.connections import connections
from rest_framework.test import APIRequestFactory
from kuma.core.i18n import activate_language_from_request
from kuma.users.tests import UserTestCase
from kuma.wiki.search import WikiDocumentType
from ..models import Index
factory = APIRequestFactory()
class ElasticTestCase(UserTestCase):
"""Base class for Elastic Search tests, providing some conveniences"""
@classmethod
def setUpClass(cls):
super(ElasticTestCase, cls).setUpClass()
if not getattr(settings, 'ES_URLS', None):
cls.skipme = True
return
try:
connections.get_connection().cluster.health()
except ConnectionError:
cls.skipme = True
return
cls._old_es_index_prefix = settings.ES_INDEX_PREFIX
settings.ES_INDEX_PREFIX = 'test-%s' % settings.ES_INDEX_PREFIX
cls._old_es_live_index = settings.ES_LIVE_INDEX
settings.ES_LIVE_INDEX = True
@classmethod
def tearDownClass(cls):
super(ElasticTestCase, cls).tearDownClass()
if not cls.skipme:
# Restore old setting.
settings.ES_INDEX_PREFIX = cls._old_es_index_prefix
settings.ES_LIVE_INDEX = cls._old_es_live_index
def setUp(self):
super(ElasticTestCase, self).setUp()
self.setup_indexes()
def tearDown(self):
super(ElasticTestCase, self).tearDown()
self.teardown_indexes()
def refresh(self, index=None):
index = index or Index.objects.get_current().prefixed_name
# Any time we're doing a refresh, we're making sure that the
# index is ready to be queried. Given that, it's almost
# always the case that we want to run all the generated tasks,
# then refresh.
connections.get_connection().indices.refresh(index=index)
def setup_indexes(self):
"""Clear and repopulate the current index."""
WikiDocumentType.reindex_all()
def teardown_indexes(self):
es = connections.get_connection()
for index in Index.objects.all():
# Ignore indices that do not exist.
es.indices.delete(index.prefixed_name, ignore=[404])
def get_request(self, *args, **kwargs):
request = factory.get(*args, **kwargs)
# setting request.LANGUAGE_CODE correctly
activate_language_from_request(request)
return request
| mpl-2.0 | -8,934,910,198,099,170,000 | 31.101266 | 74 | 0.6597 | false |
xantage/code | vilya/libs/text.py | 1 | 2278 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import urllib
import hashlib
from mikoto.libs.text import *
from mikoto.libs.emoji import *
from vilya.config import EMAIL_SUFFIX
def trunc_utf8(string, num, etc="..."):
"""truncate a utf-8 string, show as num chars.
arg: string, a utf-8 encoding string; num, look like num chars
return: a utf-8 string
"""
try:
gb = string.decode("utf8", "ignore")
except UnicodeEncodeError: # Already decoded
gb = string
gb = gb.encode("gb18030", "ignore")
if num >= len(gb):
return string
if etc:
etc_len = len(etc.decode("utf8", "ignore").encode("gb18030", "ignore"))
trunc_idx = num - etc_len
else:
trunc_idx = num
ret = gb[:trunc_idx].decode("gb18030", "ignore").encode("utf8")
if etc:
ret += etc
return ret
EMAILRE = re.compile(
r'^[_\.0-9a-zA-Z+-]+@([0-9a-zA-Z]+[0-9a-zA-Z-]*\.)+[a-zA-Z]{2,4}$')
def _validate_email(email):
if not email:
return False
if len(email) >= 6:
return EMAILRE.match(email) is not None
return False
# FIXME: bad smell, useful ?
def email_normalizer(name, email):
if _validate_email(email):
return email
else:
return name + '@' + EMAIL_SUFFIX
def is_image(fname):
return bool(RE_IMAGE_FILENAME.match(fname))
def is_binary(fname):
ext = fname.split('.')
if ext is None:
return False
if len(ext) == 1:
return ext[0] not in SOURCE_FILE
ext = '.' + ext[-1]
if ext in IS_GENERATED:
return False
if ext in IGNORE_FILE_EXTS or ext not in (SOURCE_FILE + NOT_GENERATED):
return True
return False
def gravatar_url(email, size=140):
default = "http://img3.douban.com/icon/user_normal.jpg"
url = "http://douvatar.dapps.douban.com/mirror/" + hashlib.md5(
email.encode('utf8').lower()).hexdigest() + "?"
url += urllib.urlencode({'d': default, 's': str(size), 'r': 'x'})
return url
def remove_unknown_character(text):
if isinstance(text, str):
return text.decode('utf-8', 'ignore').encode('utf-8', 'ignore')
return text
def plural(count, single, plural):
if count <= 1:
return single
else:
return plural
| bsd-3-clause | 6,086,639,086,148,238,000 | 24.595506 | 79 | 0.598332 | false |
mitsei/dlkit | dlkit/handcar/osid/osid_errors.py | 1 | 2720 | # -*- coding: utf-8 -*-
# This module contains all the osid_error exception classes and can be
# used by any python implementations that need to throw an osid error
"""
Errors are specified in each method specification. Only the specified
errors are permitted as error conditions in OSID method contracts with
exceptions noted in the descriptions below. Provider Contract errors are
never specified but may be returned at any time by an OSID Runtime. An
OSID Provider implementation is only permitted to return those errors
specified in a method contract however, some Consumer Contract errors may
be automatically handled in an OSID Runtime. Errors should result when the
contract of the interface as been violated or cannot be fulfilled and it
is necessary to disrupt the flow of control for an OSID Consumer. Different
errors are specified where it is forseen that an OSID Consumer may wish
to execute a different action without violating the encapsulation of
internal OSID Provider operations. Such actions do not include debugging
or other detailed information which is the responsibility of the OSID
Provider to manage. As such, the number of errors defined across all
the interfaces is kept to a minimum and the context of the error may vary
from method to method in accordance with the OSID specification.
"""
from ...abstract_osid.osid import errors as abc_errors
""" User Errors:
User errors are only permitted where specified in method signatures and
should be handled directly by a consumer application.
"""
class AlreadyExists(abc_errors.AlreadyExists):
pass
class NotFound(abc_errors.NotFound):
pass
class PermissionDenied(abc_errors.PermissionDenied):
pass
""" Operational Errors:
Operational errors result from failures in the system. These errors are
only permitted where specified and should be handled directly by the
consumer application.
"""
class ConfigurationError(abc_errors.ConfigurationError):
pass
class OperationFailed(abc_errors.OperationFailed):
pass
class TransactionFailure(abc_errors.TransactionFailure):
pass
""" ConsumerContract:
Errors in programming resulting from an incorrect use of the OSIDs.
Application code should be checked for accuracy. These errors should be
avoided by using the defined interoperability and flow control tests.
"""
class IllegalState(abc_errors.IllegalState):
pass
class InvalidArgument(abc_errors.InvalidArgument):
pass
class InvalidMethod(abc_errors.InvalidMethod):
pass
class NoAccess(abc_errors.NoAccess):
pass
class NullArgument(abc_errors.NullArgument):
pass
class Unimplemented(abc_errors.Unimplemented):
pass
class Unsupported(abc_errors.Unsupported):
pass
| mit | 6,822,133,982,144,225,000 | 25.930693 | 75 | 0.792279 | false |
ThomasTheSpaceFox/Desutezeoid | dzulib1.py | 1 | 6392 | #!/usr/bin/env python
import pygame.event
import pygame.key
import pygame.display
import pygame.image
import pygame.mixer
import pygame
import sys
import time
import os
pygame.display.init()
#this library should contain any functions and data needed by dezutezeoid
#that don't need to be in the actual engine executable
#some functions do rely on variables only present within DZU-ENG1.py however.
print ("dzulib initalized")
#inital main.sav file structure
savtree='''<?xml version="1.0" encoding="UTF-8"?>
<sav>
<keysav>
</keysav>
<plugsav>
</plugsav>
<pagelink/>
</sav>
'''
#main.sav init.
def initmainsave():
print ('Initalize main.sav')
mainsavfile = open(os.path.join("save", 'autosave.sav'), 'w')
mainsavfile.write(savtree)
mainsavfile.close()
def definepluginlist(pluglist):
global pluglistactive
pluglistactive=pluglist
#image scrollers
def vscroll(scrollval, image):
offs=image.get_height()
newimage=image.copy()
newimage.fill((0, 0, 0, 0))
newimage.blit(image, (0, scrollval))
if (str(scrollval))[0]=="-":
newimage.blit(image, (0, (scrollval + offs)))
else:
newimage.blit(image, (0, (scrollval - offs)))
return newimage
def hscroll(scrollval, image):
offs=image.get_width()
newimage=image.copy()
newimage.fill((0, 0, 0, 0))
newimage.blit(image, (scrollval, 0))
if (str(scrollval))[0]=="-":
newimage.blit(image, ((scrollval + offs), 0))
else:
newimage.blit(image, ((scrollval - offs), 0))
return newimage
imagepath='img'
filedict={}
textdict={}
#image alpha optimization detection.
def imagealphaoff(filename):
if (filename.lower()).endswith(".jpg") or (filename.lower()).endswith(".jpeg") or (filename.lower()).startswith("no-tr"):
return 1
else:
return 0
dummyimage=pygame.Surface((48, 48))
dummyimage.fill((255, 0, 255))
def filelookup(filename, bypasscache=False,):
global filedict
if filename in filedict and bypasscache==False:
return filedict[filename]
else:
try:
if imagealphaoff(filename):
imgret=pygame.image.load(os.path.join(imagepath, filename)).convert()
#print "noalpha"
else:
imgret=pygame.image.load(os.path.join(imagepath, filename)).convert_alpha()
#print "alpha"
filedict[filename]=imgret
return imgret
except pygame.error:
#if error, check if any plugin's imageloader API functions understand it.
for plug in pluglistactive:
#plugin imageloader API.
try:
imgret=plug.imageloader(filename)
if imgret!=None:
#cache result of imageloader call.
filedict[filename]=imgret
return imgret
except AttributeError:
continue
#nocache variant.
try:
#nocache version of imageloader.
imgret=plug.imageloader_nocache(filename)
if imgret!=None:
return imgret
except AttributeError:
continue
#if all else fails print error message and return dummy image.
print("IMAGE FILENAME ERROR: nonvalid image filename. returning dummy image...")
return dummyimage
#convienence function.
#give it a color, be it a rgb touple,
# html hex, or pygame color object, and it will always spit out a pygame color object.
def colorify(colorobj):
if type(colorobj) is pygame.Color:
return colorobj
else:
return pygame.Color(colorobj)
def textrender(text, size, fgcolor, bgcolor, transp):
#ensure colors are pygame.Color objects
fgcolor=colorify(fgcolor)
bgcolor=colorify(bgcolor)
#generate string forms of fg and bg colors for key.
kfgcolor=str(fgcolor.r)+str(fgcolor.g)+str(fgcolor.b)
kbgcolor=str(bgcolor.r)+str(bgcolor.g)+str(bgcolor.b)
global textdict
keyx=(text + str(size) + kfgcolor + kbgcolor + str(transp))
if keyx in textdict:
return textdict[keyx]
else:
texfnt=pygame.font.SysFont(None, size)
if transp==0:
texgfx=texfnt.render(text, True, fgcolor, bgcolor)
else:
texgfx=texfnt.render(text, True, fgcolor)
textdict[keyx]=texgfx
return texgfx
class clicktab:
def __init__(self, box, reftype, ref, keyid, takekey, sfxclick, sound, quitab=0, data=None):
self.box=box
self.ref=ref
self.keyid=keyid
self.takekey=takekey
self.reftype=reftype
self.sfxclick=sfxclick
self.sound=sound
self.quitab=quitab
self.data=data
def ctreport(box, selfref, dataval):
return clicktab(box, 'report', selfref, '0', '0', 0, None, quitab=0, data=dataval)
def colorboost(colorobj, amnt):
colorobj=colorify(colorobj)
rcol=colorobj.r
gcol=colorobj.g
bcol=colorobj.b
rcol+=amnt
gcol+=amnt
bcol+=amnt
if rcol>255:
rcol=255
if rcol<0:
rcol=0
if gcol>255:
gcol=255
if gcol<0:
gcol=0
if bcol>255:
bcol=255
if bcol<0:
bcol=0
return pygame.Color(rcol, gcol, bcol)
def trace3dbox(surface, basecolor, rect, linewidth=1):
basetint=colorboost(basecolor, 40)
baseshad=colorboost(basecolor, -40)
pygame.draw.line(surface, basetint, rect.topleft, rect.topright, linewidth)
pygame.draw.line(surface, basetint, rect.topleft, rect.bottomleft, linewidth)
pygame.draw.line(surface, baseshad, rect.bottomleft, rect.bottomright, linewidth)
pygame.draw.line(surface, baseshad, rect.topright, rect.bottomright, linewidth)
def colorchanlimit(color):
if color>255:
return 255
elif color<0:
return 0
else:
return color
#non-alpha 2-color gradient function. outputs a 200x200 surface, use rotval 0 for no rotation.
#rotation values of non-90-degree increments will cause the returned surface to be LARGER than 200x200.
def makegradient(startcolor, endcolor, rotval):
#print startcolor
#print endcolor
gradsurf = pygame.Surface((200, 200))
startcolor = colorify(startcolor)
endcolor = colorify(endcolor)
#calculate float increment values for each color channel
inccolorR = (startcolor.r - endcolor.r) / 200.0
inccolorG = (startcolor.g - endcolor.g) / 200.0
inccolorB = (startcolor.b - endcolor.b) / 200.0
#initalize float color data storage values
startcolorR = startcolor.r
startcolorG = startcolor.g
startcolorB = startcolor.b
colcnt = 0
#draw gradient
while colcnt < 200:
#draw horizontal line
pygame.draw.line(gradsurf, startcolor, (0, colcnt), (200, colcnt))
startcolorR -= inccolorR
startcolorG -= inccolorG
startcolorB -= inccolorB
#update color channels
startcolor.r = colorchanlimit(int(startcolorR))
startcolor.g = colorchanlimit(int(startcolorG))
startcolor.b = colorchanlimit(int(startcolorB))
colcnt += 1
if rotval==0:
return gradsurf
else:
return pygame.transform.rotate(gradsurf, rotval)
| gpl-3.0 | 7,944,259,049,090,231,000 | 26.316239 | 122 | 0.728723 | false |
timmo/gardeningPI | gists/integration.py | 1 | 20203 | import math
import requests
from requests.exceptions import *
import json
import astral
import datetime
import hashlib
from enum import Enum, unique
from kivy.clock import Clock
from kivy.logger import Logger
# Using I2C on RasPi for reading a light sensor
import platform
if 'arm' in platform.uname().machine:
import smbus # used for I2C connection to TSL2516 lux sensor
class ParsingException(Exception):
pass
@unique
class WeatherCondition(Enum):
clear = 1
cloudy = 2
drizzle = 3
rain = 4
heavy_rain = 5
hail = 6
snow = 7
heavy_snow = 8
fog = 9
wind = 10
thunderstorm = 11
tornado = 12
# TODO: load credentials from external file?
class IntegrationBase:
def __init__(self):
super().__init__()
self.refresh_data_time = 900 # every 15 minutes
Clock.schedule_interval(self.refresh, self.refresh_data_time)
def refresh(self, dt):
pass
class NetatmoIntegration(IntegrationBase):
_baseUrl = "https://api.netatmo.net/"
def __init__(self, client_id, client_secret, username, password):
super().__init__()
# TODO: load credentials from external file?
self.clientId = client_id
self.clientSecret = client_secret
self.username = username
self.password = password
self.access_token = None
self.refresh_token = None
self.refresh_access_token_time = -1
self.retry_authentication_time = 60 # every minute
self.wifiStatus = None
self.calibratingCo2 = False
self.name = "Anonymous"
self.position = astral.Location()
self.inside = {
'temperature': {
'current': "88.8"
},
'humidity': 100,
'co2': 8888.8
}
self.outside = {
'temperature': {
'min': -25.0,
'current': 38.8,
'max': 45.0
},
'battery': 100,
'connection': 100
}
self.rain = {
'rain': {
'hour': 88.8,
'day': 88.8
},
'battery': 100,
'connection': 100
}
self.alarm = []
self.locale = ''
Clock.schedule_once(self.authenticate)
def authenticate(self, dt):
Logger.debug('Netatmo: Starting authentication')
try:
params = {
"grant_type": "password",
"client_id": self.clientId,
"client_secret": self.clientSecret,
"username": self.username,
"password": self.password,
"scope": "read_station"
}
# REQUEST
response = requests.post(NetatmoIntegration._baseUrl + "oauth2/token", data=params).json()
#
# TODO: Check response
except RequestException as rex:
# Authentication failed
Logger.debug('Netatmo: Failed to authenticate')
Logger.exception(str(rex))
Clock.schedule_once(self.authenticate, self.retry_authentication_time) # TODO only for network related errors
return
self.access_token = response['access_token']
self.refresh_token = response['refresh_token']
self.refresh_access_token_time = response['expires_in']
Clock.schedule_once(self.refresh_access_token, self.refresh_access_token_time / 2)
Logger.debug('Netatmo: authentication successful')
def refresh_access_token(self, dt):
Logger.debug('Netatmo: Starting refresh of access token')
try:
# Refresh access token
params = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
"client_id": self.clientId,
"client_secret": self.clientSecret
}
response = requests.post(NetatmoIntegration._baseUrl + "oauth2/token", data=params).json()
# TODO: Check response
except RequestException as rex:
Logger.debug('Netatmo: Failed to refresh access token')
Logger.exception(str(rex))
Clock.schedule_once(self.authenticate,
self.retry_authentication_time) # TODO only for authentication related errors
return
self.refresh_token = response['refresh_token']
self.refresh_access_token_time = response['expires_in']
Clock.schedule_once(self.refresh_access_token, self.refresh_access_token_time / 2)
Logger.debug('Netatmo: Access token refreshed successfully')
def refresh(self, dt):
super().refresh(dt)
Logger.debug('Netatmo: Starting data refresh')
# Load data from netatmo portal
try:
# Read weather station
params = {
"access_token": self.access_token
}
response = requests.post(NetatmoIntegration._baseUrl + "api/getstationsdata", data=params)
#print(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
except RequestException as rex:
Logger.debug('Netatmo: Failed to refresh data')
Logger.exception(str(rex))
Logger.debug(str(response.content))
return
# Parse Response
try:
# TODO identify errors like
# {
# "error": {
# "code": 500,
# "message": "Internal Server Error"
# }
# }
# This is the station's locale string for displaying values
self.locale = response.json()['body']['user']['administrative']['reg_locale'].replace('-', '_')
station = response.json()['body']['devices'][0]
self.name = station['station_name']
self.wifiStatus = station['wifi_status']
self.calibratingCo2 = station['co2_calibrating']
self.position.name = self.name
self.position.region = station['place']['city']
self.position.latitude = station['place']['location'][1]
self.position.longitude = station['place']['location'][0]
self.position.timezone = station['place']['timezone']
self.position.elevation = 0
Logger.debug("Netatmo: Location is {} ({}, {}); timezone: {}".format(
str(self.position.region), str(self.position.latitude), str(self.position.longitude),
str(self.position.timezone)))
# Inside module
data = station['dashboard_data']
self.inside['temperature'] = {
'current': data['Temperature'],
'min': data['min_temp'],
'max': data['max_temp'],
'trend': data['temp_trend'] if 'temp_trend' in data else 0
}
self.inside['co2'] = data['CO2']
self.inside['humidity'] = data['Humidity']
self.inside['pressure'] = {
'current': data['Pressure'],
'trend': data['pressure_trend']
}
self.inside['noise'] = data['Noise']
# TODO: find a better way of identifying the modules (sequence depends on configuration)
# outside module
data = station['modules'][1]
self.outside['battery'] = data['battery_percent']
self.outside['connection'] = data['rf_status']
data = station['modules'][1]['dashboard_data']
self.outside['temperature'] = {
'current': data['Temperature'],
'min': data['min_temp'],
'max': data['max_temp'],
'trend': data['temp_trend'] if 'temp_trend' in data else 0
}
self.outside['humidity'] = data['Humidity']
# rain module
data = station['modules'][0]
self.rain['battery'] = data['battery_percent']
self.rain['connection'] = data['rf_status']
data = station['modules'][0]['dashboard_data']
self.rain['rain'] = {
'hour': data['sum_rain_1'],
'day': data['sum_rain_24'] if 'sum_rain_24' in data else 0
}
# alarms
if 'meteo_alarms' in station:
for alarm in station['meteo_alarms']:
self.alarm.append({
'type': alarm['type'],
'level': alarm['level'],
'description': alarm['descr'][13:]
})
Logger.debug('Netatmo: Data refresh successful')
except (KeyError, ValueError) as err:
Logger.debug('Netatmo: Failed to parse json')
Logger.exception(str(err))
Logger.debug(str(response.content))
class OpenWeatherMapIntegration(IntegrationBase):
_baseUrl = "http://api.openweathermap.org/data/2.5/"
_iconUrl = "http://openweathermap.org/img/w/"
def __init__(self, position, app_id):
super().__init__()
self.position = position
self.appId = app_id
self.forecast = []
# Converts OWM weather id to common weather condition
def _convert_weather_id(self, weather_id):
if 200 <= weather_id <= 299:
return WeatherCondition.thunderstorm
if 300 <= weather_id <= 399:
return WeatherCondition.drizzle
# 400 range does not exist?
if 500 == weather_id:
return WeatherCondition.drizzle
if 501 == weather_id:
return WeatherCondition.rain
if 502 <= weather_id <= 599:
return WeatherCondition.heavy_rain
if 600 <= weather_id <= 601:
return WeatherCondition.snow
if 602 <= weather_id <= 699:
return WeatherCondition.heavy_snow
if 700 <= weather_id <= 780:
return WeatherCondition.fog
if weather_id == 781:
return WeatherCondition.tornado
# Clear Sky
if weather_id == 800:
return WeatherCondition.clear
# Clouds
if 801 <= weather_id <= 804:
return WeatherCondition.cloudy
if 900 <= weather_id <= 902:
return WeatherCondition.tornado
if weather_id == 905 or 957 <= weather_id <= 962:
return WeatherCondition.wind
if weather_id == 906:
return WeatherCondition.hail
return None
def refresh(self, dt):
super().refresh(dt)
Logger.debug('OWM: Starting data refresh')
Logger.debug("OWM: using location {} ({}, {}); timezone: {}".format(
str(self.position.region), str(self.position.latitude), str(self.position.longitude),
str(self.position.timezone)))
try:
# Forecast (16 days)
params = {
"lat": self.position.latitude,
"lon": self.position.longitude,
"mode": "json",
"appid": self.appId,
"units": "metric",
"lang": "de",
"cnt": 10
}
response = requests.get(OpenWeatherMapIntegration._baseUrl + "forecast/daily", params=params);
# print(json.dumps(response.json(), indent=2))
except RequestException as rex:
Logger.debug('OWM: Failed to refresh data')
Logger.exception(str(rex))
return
# Parse response
try:
for entry in response.json()['list']:
timestamp = datetime.datetime.fromtimestamp(entry['dt'])
self.forecast.append({
'time': timestamp,
'description': entry['weather'][0]['description'],
'icon': entry['weather'][0]['icon'],
'id': self._convert_weather_id(entry['weather'][0]['id']),
'temperature': {
"min": float(format(entry['temp']['min'], '.1f')),
"max": float(format(entry['temp']['max'], '.1f')),
},
'pressure': entry['pressure'],
'humidity': entry['humidity'],
'clouds': entry['clouds'] if 'clouds' in entry else 0,
'snow': entry['snow'] if 'snow' in entry else 0,
'rain': entry['rain'] if 'rain' in entry else 0
})
except KeyError as kerr:
Logger.debug('OWM: Failed to parse json')
Logger.exception(str(kerr))
Logger.debug(str(response.content))
Logger.debug('OWM: Data refresh successful')
# Only gives min/max temperature for today and next two days
class WetterComIntegration(IntegrationBase):
_baseUrl = "http://api.wetter.com/forecast/weather/city/{}/project/{}/cs/{}"
def __init__(self, city_code, project_name, api_key):
super().__init__()
self.minimumTemperature = -25;
self.maximumTemperature = 45;
self.id = None;
self._city_code = city_code
self._project_name = project_name
self._api_key = api_key
# Converts Wetter.com id to common weather condition
def _convert_weather_id(self, weather_id):
if weather_id == 0:
return WeatherCondition.clear
if weather_id in (1, 2, 3) or 10 <= weather_id <= 39:
return WeatherCondition.cloudy
if weather_id == 4 or 40 <= weather_id <= 49:
return WeatherCondition.fog
if weather_id in (5, 50, 51, 53, 56):
return WeatherCondition.drizzle
if weather_id in (6, 8, 60, 61, 63):
return WeatherCondition.rain
if weather_id in (55, 65, 80, 81, 82):
return WeatherCondition.heavy_rain
if weather_id in (57, 66, 67, 69, 83, 84):
return WeatherCondition.hail
if weather_id in (7, 68, 70, 71, 73, 85):
return WeatherCondition.snow
if weather_id in (75, 86):
return WeatherCondition.heavy_snow
if weather_id == 9 or 90 <= weather_id <= 99:
return WeatherCondition.thunderstorm
return None
def refresh(self, dt):
super().refresh(dt)
Logger.debug('Wetter.com: Starting data refresh')
# Read current weather from wetter.com
try:
params = {
"output": 'json'
}
checksum = hashlib.md5(self._project_name.encode('utf-8') + self._api_key.encode('utf-8') +
self._city_code.encode('utf-8')).hexdigest()
response = requests.get(WetterComIntegration._baseUrl.format(self._city_code, self._project_name, checksum),
params=params);
# print(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
data = response.json()
except RequestException and ValueError and ConnectionError as ex:
Logger.debug('Wetter.com: Failed to refresh data')
Logger.exception(str(ex))
if 'response' in locals():
msg = str(response.content)
else:
msg = ""
Logger.debug(msg)
return
# Parse response
try:
now = datetime.datetime.now()
for daystring, forecast in data['city']['forecast'].items():
day = datetime.datetime.strptime(daystring, '%Y-%m-%d')
if day.date() == now.date():
self.minimumTemperature = float(forecast['tn'])
self.maximumTemperature = float(forecast['tx'])
# TODO: take values from last day for range 00:00 .. 05:59
if 6 <= now.hour <= 10:
weather_id = forecast['06:00']['w']
elif 11 <= now.hour <= 16:
weather_id = forecast['11:00']['w']
elif 17 <= now.hour <= 22:
weather_id = forecast['17:00']['w']
else:
weather_id = forecast['23:00']['w']
self.id = self._convert_weather_id(int(weather_id))
break
else:
Logger.warning('Wetter.com: Unable to find date {} in forecast'.format(now.strftime('%Y-%m-%d')))
except KeyError and AttributeError as err:
Logger.warning('Wetter.com: Unable to parse json')
Logger.debug('Wetter.com: \n' +
json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
Logger.exception(str(err))
Logger.debug('Wetter.com: Data refresh successful')
Logger.debug('Wetter.com: Got id {}'.format(self.id))
# This is the new, improved version for brightness control, using a TSL2561 via I2C
class TSL2516BrightnessRegulation(IntegrationBase):
def __init__(self):
super().__init__()
self.bus = 1
self.address = 0x39
self.ambient_light = 0
self.infrared_light = 0
self.lux = 2.0
# Weight of latest lux measurement in overall brightness calculation. Used for slowing down changes in
# brightness. A value of 1.0 completely ignores the old lux value
self.new_lux_weight = 0.05;
self.brightness = 120
self.min_brightness = 15
self.max_brightness = 255
self.device = '/sys/class/backlight/rpi_backlight/brightness'
def refresh(self, dt):
super().refresh(dt)
# Measure brightness via TSL2516 lux sensor on I2C bus 1
# see http://www.mogalla.net/201502/lichtsensor-tsl2561-am-raspberry (german)
# Code would benefit from a block read command. smbus-cffi 0.5.1 documentation mentions that block reads
# currently crash with a kernel panic on RasPi. Thus, reading single bytes.
# TODO: Try block reads
try:
bus = smbus.SMBus(self.bus)
bus.write_byte_data(self.address, 0x80, 0x03) # init measurement
lb = bus.read_byte_data(self.address, 0x8c)
hb = bus.read_byte_data(self.address, 0x8d)
self.ambient_light = (hb << 8) | lb
lb = bus.read_byte_data(self.address, 0x8e)
hb = bus.read_byte_data(self.address, 0x8f)
self.infrared_light = (hb << 8) + lb
except IOError as ex:
Logger.warning("Brightness: Problems using I2C bus ({}) ".format(str(ex)))
# TODO: some countermeasure? bus reset?
return
# Calculate Lux value (see example in TSL2561 datasheet)
if self.ambient_light == 0:
return # ratio would result in div by 0, avoid
ratio = self.infrared_light / float(self.ambient_light)
if 0 < ratio <= 0.50:
new_lux = 0.0304 * self.ambient_light - 0.062 * self.ambient_light * (ratio ** 1.4)
elif 0.50 < ratio <= 0.61:
new_lux = 0.0224 * self.ambient_light - 0.031 * self.infrared_light
elif 0.61 < ratio <= 0.80:
new_lux = 0.0128 * self.ambient_light - 0.0153 * self.infrared_light
elif 0.80 < ratio <= 1.3:
new_lux = 0.00146 * self.ambient_light - 0.00112 * self.infrared_light
else:
new_lux = 0
# Weighted average of old and current value
self.lux = (1.0 - self.new_lux_weight) * self.lux + self.new_lux_weight * new_lux
# Use a logarithmic function to map lux to brightness, clamp to min..max
new_brightness = max(self.min_brightness, min(round(math.log10(self.lux+1.5)*300.0), self.max_brightness))
# Write to device
if self.brightness != new_brightness:
Logger.debug('Brightness: Setting to {} ({} lux) - current {} lux)'.format(
str(new_brightness), "%.2f" % self.lux, "%.2f" % new_lux))
self.brightness = new_brightness
with open(self.device, 'w') as d:
d.write(str(self.brightness))
| apache-2.0 | -8,395,176,535,836,273,000 | 37.629063 | 121 | 0.542395 | false |
KurtHaffner/Weather_Project | test.py | 1 | 3352 | from tkinter import *
from pygeocoder import Geocoder
import forecastio
def getForecast(latt, lngg, results):
#Set up the box that will hold all the results.
result = Tk()
result.title("Weather Results")
api_key = "d747bd6c3aa83c90ecc20dbbb019d5ea"
lat = latt
lng = lngg
forecast = forecastio.load_forecast(api_key, lat, lng)
#Get the daily forecast and print a nice summary.
byday = forecast.daily()
#Add labels with the location and summary.
something = "The weather for {0}, {1}.".format(results[0].city,results[0].state)
Label(result, text=something).grid(row=0)
Label(result, text="").grid(row=1)
Label(result, text=byday.summary).grid(row=2)
#Get the current data in a datapoint.
current = forecast.currently()
#Set up variables for all the data needed.
temp = current.temperature
feelsTemp = current.apparentTemperature
humidity = current.humidity
prob = current.precipProbability * 100
dis = current.nearestStormDistance
intensity = current.precipIntensity
#Print the temperature and feels like temp with humidity.
something = "The current temperature is {0} degrees fahrenheit.".format(temp)
something1 = "The temperature feels like {0} degrees fahrenheit.".format(feelsTemp)
something2 = "The current humidity is {0}%.".format(humidity*100)
#Add labels for temperature and feels like temp with humidity.
Label(result, text="").grid(row=3)
Label(result, text=something).grid(row=4)
Label(result, text=something1).grid(row=5)
Label(result, text=something2).grid(row=6)
#Print the hourly summary.
byHour = forecast.hourly()
#Add hourly summary.
Label(result, text="").grid(row=7)
Label(result, text=byHour.summary).grid(row=8)
#Print the storm and rain/snow information.
something = "The probablity of precipitation right now is {0}%".format(prob)
something1 = "The nearest storm is {0} miles away.".format(dis)
#Add the storm and rain/snow info.
Label(result, text="").grid(row=9)
Label(result, text=something).grid(row=10)
Label(result, text=something1).grid(row=11)
#Check to see if the probability is high enough to print storm info.
if prob >= 50.0:
typePrec = current.precipType
something = "The precipitation intensity is {0} inches an hour.".format(intensity)
something1 = "The type of precipitation is {0}.".format(typePrec)
#Add to the window.
Label(result, text="").grid(row=12)
Label(result, text=something).grid(row=13)
Label(result, text=something1).grid(row=14)
return
def do_stuff():
#Put the input into a geocoder object.
results = Geocoder.geocode(e1.get())
#Call the getForecast function with lat and long.
getForecast(results[0].latitude,results[0].longitude, results)
#End the GUI.
master.destroy()
#Set up the prompt for finding the lat and long.
master = Tk()
master.title("Weather Widget")
Label(master, text="Please enter an address, city or zip code").grid(row=0)
e1 = Entry(master)
e1.grid(row=1, column=0)
Button(master, text="Get Weather", command=do_stuff).grid(row=2, column=0, sticky=W, pady=4)
Button(master, text="Quit", command=master.destroy).grid(row=2, column=1, sticky=W, pady=4)
mainloop()
| apache-2.0 | 5,965,499,589,496,558,000 | 30.92381 | 92 | 0.686158 | false |
shankari/folium | folium/map.py | 1 | 29178 | # -*- coding: utf-8 -*-
"""
Map
------
Classes for drawing maps.
"""
from __future__ import unicode_literals
import json
from collections import OrderedDict
from jinja2 import Environment, PackageLoader, Template
from branca.six import text_type, binary_type
from branca.utilities import _parse_size
from branca.element import (Element, Figure, MacroElement, Html,
JavascriptLink, CssLink)
ENV = Environment(loader=PackageLoader('folium', 'templates'))
_default_js = [
('leaflet',
'https://unpkg.com/[email protected]/dist/leaflet.js'),
('jquery',
'https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js'),
('bootstrap',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js'),
('awesome_markers',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.js'), # noqa
('marker_cluster_src',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster-src.js'), # noqa
('marker_cluster',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster.js'), # noqa
]
_default_css = [
('leaflet_css',
'https://unpkg.com/[email protected]/dist/leaflet.css'),
('bootstrap_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css'),
('bootstrap_theme_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css'), # noqa
('awesome_markers_font_css',
'https://maxcdn.bootstrapcdn.com/font-awesome/4.6.3/css/font-awesome.min.css'), # noqa
('awesome_markers_css',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.css'), # noqa
('marker_cluster_default_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.Default.css'), # noqa
('marker_cluster_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.css'), # noqa
('awesome_rotate_css',
'https://rawgit.com/python-visualization/folium/master/folium/templates/leaflet.awesome.rotate.css'), # noqa
]
class LegacyMap(MacroElement):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from a list of built-in tiles,
pass a custom URL or pass `None` to create a map without tiles.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
crs : str, default 'EPSG3857'
Defines coordinate reference systems for projecting geographical points
into pixel (screen) coordinates and back.
You can use Leaflet's values :
* EPSG3857 : The most common CRS for online maps, used by almost all
free and commercial tile providers. Uses Spherical Mercator projection.
Set in by default in Map's crs option.
* EPSG4326 : A common CRS among GIS enthusiasts.
Uses simple Equirectangular projection.
* EPSG3395 : Rarely used by some commercial tile providers.
Uses Elliptical Mercator projection.
* Simple : A simple CRS that maps longitude and latitude into
x and y directly. May be used for maps of flat surfaces
(e.g. game maps). Note that the y axis should still be inverted
(going from bottom to top).
control_scale : bool, default False
Whether to add a control scale on the map.
prefer_canvas : bool, default False
Forces Leaflet to use the Canvas back-end (if available) for
vector layers instead of SVG. This can increase performance
considerably in some cases (e.g. many thousands of circle
markers on the map).
no_touch : bool, default False
Forces Leaflet to not use touch events even if it detects them.
disable_3d : bool, default False
Forces Leaflet to not use hardware-accelerated CSS 3D
transforms for positioning (which may cause glitches in some
rare environments) even if they're supported.
Returns
-------
Folium LegacyMap Object
Examples
--------
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... width=750, height=500)
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... tiles='Mapbox Control Room')
>>> map = folium.LegacyMap(location=(45.523, -122.675), max_zoom=20,
... tiles='Cloudmade', API_key='YourKey')
>>> map = folium.LegacyMap(location=[45.523, -122.675], zoom_start=2,
... tiles=('http://{s}.tiles.mapbox.com/v3/'
... 'mapbox.control-room/{z}/{x}/{y}.png'),
... attr='Mapbox attribution')
"""
def __init__(self, location=None, width='100%', height='100%',
left="0%", top="0%", position='relative',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, continuous_world=False, world_copy_jump=False,
no_wrap=False, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180, max_bounds=True,
detect_retina=False, crs='EPSG3857', control_scale=False,
prefer_canvas=False, no_touch=False, disable_3d=False):
super(LegacyMap, self).__init__()
self._name = 'Map'
self._env = ENV
if not location:
# If location is not passed we center and ignore zoom.
self.location = [0, 0]
self.zoom_start = min_zoom
else:
self.location = location
self.zoom_start = zoom_start
Figure().add_child(self)
# Map Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self.min_lat = min_lat
self.max_lat = max_lat
self.min_lon = min_lon
self.max_lon = max_lon
self.max_bounds = max_bounds
self.continuous_world = continuous_world
self.no_wrap = no_wrap
self.world_copy_jump = world_copy_jump
self.crs = crs
self.control_scale = control_scale
self.global_switches = GlobalSwitches(prefer_canvas, no_touch, disable_3d)
if tiles:
self.add_tile_layer(
tiles=tiles, min_zoom=min_zoom, max_zoom=max_zoom,
continuous_world=continuous_world, no_wrap=no_wrap, attr=attr,
API_key=API_key, detect_retina=detect_retina
)
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div class="folium-map" id="{{this.get_name()}}" ></div>
{% endmacro %}
{% macro script(this, kwargs) %}
{% if this.max_bounds %}
var southWest = L.latLng({{ this.min_lat }}, {{ this.min_lon }});
var northEast = L.latLng({{ this.max_lat }}, {{ this.max_lon }});
var bounds = L.latLngBounds(southWest, northEast);
{% else %}
var bounds = null;
{% endif %}
var {{this.get_name()}} = L.map(
'{{this.get_name()}}',
{center: [{{this.location[0]}},{{this.location[1]}}],
zoom: {{this.zoom_start}},
maxBounds: bounds,
layers: [],
worldCopyJump: {{this.world_copy_jump.__str__().lower()}},
crs: L.CRS.{{this.crs}}
});
{% if this.control_scale %}L.control.scale().addTo({{this.get_name()}});{% endif %}
{% endmacro %}
""") # noqa
def _repr_html_(self, **kwargs):
"""Displays the Map in a Jupyter notebook.
"""
if self._parent is None:
self.add_to(Figure())
out = self._parent._repr_html_(**kwargs)
self._parent = None
else:
out = self._parent._repr_html_(**kwargs)
return out
def add_tile_layer(self, tiles='OpenStreetMap', name=None,
API_key=None, max_zoom=18, min_zoom=1,
continuous_world=False, attr=None, active=False,
detect_retina=False, no_wrap=False, **kwargs):
"""
Add a tile layer to the map. See TileLayer for options.
"""
tile_layer = TileLayer(tiles=tiles, name=name,
min_zoom=min_zoom, max_zoom=max_zoom,
attr=attr, API_key=API_key,
detect_retina=detect_retina,
continuous_world=continuous_world,
no_wrap=no_wrap)
self.add_child(tile_layer, name=tile_layer.tile_name)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
# Set global switches
figure.header.add_child(self.global_switches, name='global_switches')
# Import Javascripts
for name, url in _default_js:
figure.header.add_child(JavascriptLink(url), name=name)
# Import Css
for name, url in _default_css:
figure.header.add_child(CssLink(url), name=name)
figure.header.add_child(Element(
'<style>html, body {'
'width: 100%;'
'height: 100%;'
'margin: 0;'
'padding: 0;'
'}'
'</style>'), name='css_style')
figure.header.add_child(Element(
'<style>#map {'
'position:absolute;'
'top:0;'
'bottom:0;'
'right:0;'
'left:0;'
'}'
'</style>'), name='map_style')
super(LegacyMap, self).render(**kwargs)
class GlobalSwitches(Element):
def __init__(self, prefer_canvas=False, no_touch=False, disable_3d=False):
super(GlobalSwitches, self).__init__()
self._name = 'GlobalSwitches'
self.prefer_canvas = prefer_canvas
self.no_touch = no_touch
self.disable_3d = disable_3d
self._template = Template(
'<script>'
'L_PREFER_CANVAS = {% if this.prefer_canvas %}true{% else %}false{% endif %}; '
'L_NO_TOUCH = {% if this.no_touch %}true{% else %}false{% endif %}; '
'L_DISABLE_3D = {% if this.disable_3d %}true{% else %}false{% endif %};'
'</script>'
)
class Layer(MacroElement):
"""An abstract class for everything that is a Layer on the map.
It will be used to define whether an object will be included in
LayerControls.
Parameters
----------
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, name=None, overlay=False, control=True):
super(Layer, self).__init__()
self.layer_name = name if name is not None else self.get_name()
self.overlay = overlay
self.control = control
class TileLayer(Layer):
"""Create a tile layer to append on a Map.
Parameters
----------
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from this list of built-in tiles:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
min_zoom: int, default 1
Minimal zoom for which the layer will be displayed.
max_zoom: int, default 18
Maximal zoom for which the layer will be displayed.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, tiles='OpenStreetMap', min_zoom=1, max_zoom=18,
attr=None, API_key=None, detect_retina=False,
continuous_world=False, name=None, overlay=False,
control=True, no_wrap=False):
self.tile_name = (name if name is not None else
''.join(tiles.lower().strip().split()))
super(TileLayer, self).__init__(name=self.tile_name, overlay=overlay,
control=control)
self._name = 'TileLayer'
self._env = ENV
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.no_wrap = no_wrap
self.continuous_world = continuous_world
self.detect_retina = detect_retina
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
templates = list(self._env.list_templates(
filter_func=lambda x: x.startswith('tiles/')))
tile_template = 'tiles/'+self.tiles+'/tiles.txt'
attr_template = 'tiles/'+self.tiles+'/attr.txt'
if tile_template in templates and attr_template in templates:
self.tiles = self._env.get_template(tile_template).render(API_key=API_key) # noqa
self.attr = self._env.get_template(attr_template).render()
else:
self.tiles = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution.')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.attr = attr
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{
maxZoom: {{this.max_zoom}},
minZoom: {{this.min_zoom}},
continuousWorld: {{this.continuous_world.__str__().lower()}},
noWrap: {{this.no_wrap.__str__().lower()}},
attribution: '{{this.attr}}',
detectRetina: {{this.detect_retina.__str__().lower()}}
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
class FeatureGroup(Layer):
"""
Create a FeatureGroup layer ; you can put things in it and handle them
as a single layer. For example, you can add a LayerControl to
tick/untick the whole group.
Parameters
----------
name : str, default None
The name of the featureGroup layer.
It will be displayed in the LayerControl.
If None get_name() will be called to get the technical (ugly) name.
overlay : bool, default True
Whether your layer will be an overlay (ticked with a check box in
LayerControls) or a base layer (ticked with a radio button).
"""
def __init__(self, name=None, overlay=True, control=True):
super(FeatureGroup, self).__init__(overlay=overlay, control=control, name=name) # noqa
self._name = 'FeatureGroup'
self.tile_name = name if name is not None else self.get_name()
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.featureGroup(
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class LayerControl(MacroElement):
"""
Creates a LayerControl object to be added on a folium map.
Parameters
----------
position : str
The position of the control (one of the map corners), can be
'topleft', 'topright', 'bottomleft' or 'bottomright'
default: 'topright'
collapsed : boolean
If true the control will be collapsed into an icon and expanded on
mouse hover or touch.
default: True
autoZIndex : boolean
If true the control assigns zIndexes in increasing order to all of
its layers so that the order is preserved when switching them on/off.
default: True
"""
def __init__(self, position='topright', collapsed=True, autoZIndex=True):
super(LayerControl, self).__init__()
self._name = 'LayerControl'
self.position = position
self.collapsed = str(collapsed).lower()
self.autoZIndex = str(autoZIndex).lower()
self.base_layers = OrderedDict()
self.overlays = OrderedDict()
self._template = Template("""
{% macro script(this,kwargs) %}
var {{this.get_name()}} = {
base_layers : { {% for key,val in this.base_layers.items() %}"{{key}}" : {{val}},{% endfor %} },
overlays : { {% for key,val in this.overlays.items() %}"{{key}}" : {{val}},{% endfor %} }
};
L.control.layers(
{{this.get_name()}}.base_layers,
{{this.get_name()}}.overlays,
{position: '{{this.position}}',
collapsed: {{this.collapsed}},
autoZIndex: {{this.autoZIndex}}
}).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
# We select all Layers for which (control and not overlay).
self.base_layers = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(not hasattr(val, 'overlay') or not val.overlay) and
(not hasattr(val, 'control') or val.control)])
# We select all Layers for which (control and overlay).
self.overlays = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(hasattr(val, 'overlay') and val.overlay) and
(not hasattr(val, 'control') or val.control)])
super(LayerControl, self).render()
class Icon(MacroElement):
"""
Creates an Icon object that will be rendered
using Leaflet.awesome-markers.
Parameters
----------
color : str, default 'blue'
The color of the marker. You can use:
['red', 'blue', 'green', 'purple', 'orange', 'darkred',
'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue',
'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen',
'gray', 'black', 'lightgray']
icon_color : str, default 'white'
The color of the drawing on the marker. You can use colors above,
or an html color code.
icon : str, default 'info-sign'
The name of the marker sign.
See Font-Awesome website to choose yours.
Warning : depending on the icon you choose you may need to adapt
the `prefix` as well.
angle : int, default 0
The icon will be rotated by this amount of degrees.
prefix : str, default 'glyphicon'
The prefix states the source of the icon. 'fa' for font-awesome or
'glyphicon' for bootstrap 3.
For more details see:
https://github.com/lvoogdt/Leaflet.awesome-markers
"""
def __init__(self, color='blue', icon_color='white', icon='info-sign',
angle=0, prefix='glyphicon'):
super(Icon, self).__init__()
self._name = 'Icon'
self.color = color
self.icon = icon
self.icon_color = icon_color
self.angle = angle
self.prefix = prefix
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.AwesomeMarkers.icon({
icon: '{{this.icon}}',
iconColor: '{{this.icon_color}}',
markerColor: '{{this.color}}',
prefix: '{{this.prefix}}',
extraClasses: 'fa-rotate-{{this.angle}}'
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""")
class Marker(MacroElement):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object.
icon: Icon plugin
the Icon plugin to use to render the marker.
Returns
-------
Marker names and HTML in obj.template_vars
Examples
--------
>>> Marker(location=[45.5, -122.3], popup='Portland, OR')
>>> Marker(location=[45.5, -122.3], popup=folium.Popup('Portland, OR'))
"""
def __init__(self, location, popup=None, icon=None):
super(Marker, self).__init__()
self._name = 'Marker'
self.location = location
if icon is not None:
self.add_child(icon)
if isinstance(popup, text_type) or isinstance(popup, binary_type):
self.add_child(Popup(popup))
elif popup is not None:
self.add_child(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.marker(
[{{this.location[0]}},{{this.location[1]}}],
{
icon: new L.Icon.Default()
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def _get_self_bounds(self):
"""Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
return [[self.location[0], self.location[1]],
[self.location[0], self.location[1]]]
class Popup(Element):
"""Create a Popup instance that can be linked to a Layer.
Parameters
----------
html: string or Element
Content of the Popup.
max_width: int, default 300
The maximal width of the popup.
"""
def __init__(self, html=None, max_width=300):
super(Popup, self).__init__()
self._name = 'Popup'
self.header = Element()
self.html = Element()
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
if isinstance(html, Element):
self.html.add_child(html)
elif isinstance(html, text_type) or isinstance(html, binary_type):
self.html.add_child(Html(text_type(html)))
self.max_width = max_width
self._template = Template(u"""
var {{this.get_name()}} = L.popup({maxWidth: '{{this.max_width}}'});
{% for name, element in this.html._children.items() %}
var {{name}} = $('{{element.render(**kwargs).replace('\\n',' ')}}')[0];
{{this.get_name()}}.setContent({{name}});
{% endfor %}
{{this._parent.get_name()}}.bindPopup({{this.get_name()}});
{% for name, element in this.script._children.items() %}
{{element.render()}}
{% endfor %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for name, child in self._children.items():
child.render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.script.add_child(Element(
self._template.render(this=self, kwargs=kwargs)),
name=self.get_name())
class FitBounds(MacroElement):
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
"""
def __init__(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
super(FitBounds, self).__init__()
self._name = 'FitBounds'
self.bounds = json.loads(json.dumps(bounds))
options = {
'maxZoom': max_zoom,
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
}
self.fit_bounds_options = json.dumps({key: val for key, val in
options.items() if val},
sort_keys=True)
self._template = Template(u"""
{% macro script(this, kwargs) %}
{% if this.autobounds %}
var autobounds = L.featureGroup({{ this.features }}).getBounds()
{% endif %}
{{this._parent.get_name()}}.fitBounds(
{% if this.bounds %}{{ this.bounds }}{% else %}"autobounds"{% endif %},
{{ this.fit_bounds_options }}
);
{% endmacro %}
""") # noqa
| mit | -2,931,999,732,305,539,600 | 38.42973 | 114 | 0.561587 | false |
Pica4x6/numina | numina/core/dataholders.py | 1 | 3419 | #
# Copyright 2008-2014 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# Numina is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Numina is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Numina. If not, see <http://www.gnu.org/licenses/>.
#
"""
Recipe requirements
"""
import inspect
from .types import NullType, PlainPythonType
from .types import ListOfType
class EntryHolder(object):
def __init__(self, tipo, description, destination, optional,
default, choices=None, validation=True):
super(EntryHolder, self).__init__()
if tipo is None:
self.type = NullType()
elif tipo in [bool, str, int, float, complex, list]:
self.type = PlainPythonType(ref=tipo())
elif isinstance(tipo, ListOfType):
self.type = tipo
elif inspect.isclass(tipo):
self.type = tipo()
else:
self.type = tipo
self.description = description
self.optional = optional
self.dest = destination
self.default = default
self.choices = choices
self.validation = validation
def __get__(self, instance, owner):
"""Getter of the descriptor protocol."""
if instance is None:
return self
else:
if self.dest not in instance._numina_desc_val:
instance._numina_desc_val[self.dest] = self.default_value()
return instance._numina_desc_val[self.dest]
def __set__(self, instance, value):
"""Setter of the descriptor protocol."""
cval = self.convert(value)
if self.choices and (cval not in self.choices):
raise ValueError('{} not in {}'.format(cval, self.choices))
instance._numina_desc_val[self.dest] = cval
def convert(self, val):
return self.type.convert(val)
def validate(self, val):
if self.validation:
return self.type.validate(val)
return True
def default_value(self):
if self.default is not None:
return self.convert(self.default)
if self.type.default is not None:
return self.type.default
if self.optional:
return None
else:
fmt = 'Required {0!r} of type {1!r} is not defined'
msg = fmt.format(self.dest, self.type)
raise ValueError(msg)
class Product(EntryHolder):
'''Product holder for RecipeResult.'''
def __init__(self, ptype, description='', validation=True,
dest=None, optional=False, default=None, *args, **kwds):
super(Product, self).__init__(
ptype, description, dest, optional,
default, choices=None, validation=validation
)
# if not isinstance(self.type, DataProductType):
# raise TypeError('type must be of class DataProduct')
def __repr__(self):
return 'Product(type=%r, dest=%r)' % (self.type, self.dest)
| gpl-3.0 | -1,352,558,844,343,390,200 | 31.561905 | 75 | 0.622112 | false |
abeing/droog | droog/message.py | 1 | 2589 | # Droog
# Copyright (C) 2015 Adam Miezianko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Droog - Message
The message module defines the Message class which is a queue of messages to
display in the user interface.
"""
import Queue
import logging
import english
LOG = logging.getLogger(__name__)
class Messages(object):
"""The Messages class allows various components to add messages to be
displayed in the user interface. The user interface can then filter and
format the messages."""
def __init__(self, turn=None, history_size=100):
self._queue = Queue.Queue()
self.history = []
self._history_size = history_size
self._turn = turn
def add(self, message, clean=True):
"""Add a message to the message queue.
clean -- If true, process it for proper English form
"""
if clean:
message = english.make_sentence(message)
if len(message) is not 0:
LOG.info("Adding '%s' to the message queue.", message)
time = self._turn.current_time() if self._turn else ""
self._queue.put((message, time))
else:
LOG.warning("Zero-length message not added to the message queue.")
def empty(self):
"""True if the message queue is empty."""
return self._queue.empty()
def get(self):
"""Return the next message in the queue."""
message, time = self._queue.get()
if self._history_size > 0 and len(self.history) >= self._history_size:
self.history.pop(0)
self.history.append((message, time))
return message
def get_history(self, index, time=True):
"""Get an (optionally time-stamped) message from the history."""
if index > len(self.history):
return ""
text, time = self.history[index]
if time:
return "%s %s" % (time, text)
return text
| gpl-2.0 | -1,923,911,494,254,833,700 | 33.52 | 78 | 0.649672 | false |
ConsumerAffairs/django-document-similarity | docsim/documents/migrations/0002_auto__add_cluster.py | 1 | 2213 | # -*- coding: utf-8 -*-
# flake8: noqa
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cluster'
db.create_table('documents_cluster', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('parameters', self.gf('django.db.models.fields.TextField')(default={})),
))
db.send_create_signal('documents', ['Cluster'])
# Adding M2M table for field documents on 'Cluster'
db.create_table('documents_cluster_documents', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('cluster', models.ForeignKey(orm['documents.cluster'], null=False)),
('document', models.ForeignKey(orm['documents.document'], null=False))
))
db.create_unique('documents_cluster_documents', ['cluster_id', 'document_id'])
def backwards(self, orm):
# Deleting model 'Cluster'
db.delete_table('documents_cluster')
# Removing M2M table for field documents on 'Cluster'
db.delete_table('documents_cluster_documents')
models = {
'documents.cluster': {
'Meta': {'object_name': 'Cluster'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['documents.Document']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '{}'})
},
'documents.document': {
'Meta': {'object_name': 'Document'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['documents']
| agpl-3.0 | 4,228,240,815,063,617,500 | 41.557692 | 175 | 0.591505 | false |
astrofrog/python-montage | montage/commands.py | 1 | 80203 | import subprocess
import status
import shlex
from commands_extra import *
def mAdd(images_table, template_header, out_image, img_dir=None,
no_area=False, type=None, exact=False, debug_level=None,
status_file=None, mpi=False, n_proc=8):
'''
Coadd the reprojected images in an input list to form an output mosaic
with FITS header keywords specified in a header file. Creates two output
files, one containing the coadded pixel values, and the other containing
coadded pixel area values. The pixel area values can be used as a
weighting function if the output pixel values are themselves to be coadded
with other projected images, and may also be used in validating the
fidelity of the output pixel values.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be coadded.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
*out_image* [ value ]
Name of output FITS image.
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing reprojected images. If the
img_dir option is not included, mAdd will look for the input
images in the current working directory.
*no_area* [ True | False ]
Co-addition ignores weighting by pixel areas and performs
coaddition based only on pixel postions. Will not output an area
image for the output image.
*type* [ value ]
Select type of averaging to be done on accumulated pixel values
(either mean or median). To generate a map showing counts of how
man_y times each pixel was overlapped by the input images, use
count.
*exact* [ True | False ]
Enables exact size mode. The output image will match the header
template exactly, instead of shrinking the output to fit the data.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mAdd output and errors will be written to status_file instead of
stdout.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mAddMPI" % n_proc
else:
command = "mAdd"
if img_dir:
command += " -p %s" % str(img_dir)
if no_area:
command += " -n"
if type:
command += " -a %s" % str(type)
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mAdd", p.stdout.read().strip())
def mAddExec(images_table, template_header, tile_dir, out_image, img_dir=None,
no_area=False, type=None, exact=False, debug_level=None,
status_file=None, mpi=False, n_proc=8):
'''
Builds a series of outputs (which together make up a tiled output) through
multiple executions of the mAdd modules.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be coadded.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
*tile_dir* [ value ]
Directory to contain output tile images and header templates
*out_image* [ value ]
Prefix for output tile images
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing reprojected images. If the
img_dir option is not included, mAdd will look for the input
images in the current working directory.
*no_area* [ True | False ]
Co-addition ignores weighting by pixel areas and performs
coaddition based only on pixel postions. Will not output an area
image for the output image.
*type* [ value ]
Select type of averaging to be done on accumulated pixel values
(either mean or median).
*exact* [ True | False ]
Enables exact size mode. The output image will match the header
template exactly, instead of shrinking the output to fit the data.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mAdd output and errors will be written to status_file instead of
stdout.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mAddExecMPI" % n_proc
else:
command = "mAddExec"
if img_dir:
command += " -p %s" % str(img_dir)
if no_area:
command += " -n"
if type:
command += " -a %s" % str(type)
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(tile_dir)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mAddExec", p.stdout.read().strip())
def mArchiveExec(region_table, debug_level=None):
'''
Given a table of archive images (generated by mArchiveList), calls
mArchiveGet on each one in sequence to retrieve all the files into the
current directory.
Required Arguments:
*region_table* [ value ]
Table of archive images, generated by mArchiveList.
Optional Arguments:
*debug_level* [ value ]
Prints out additional debugging information; in this version, the
only supported level is 1.
'''
command = "mArchiveExec"
if debug_level:
command += " -d %s" % str(debug_level)
command += " " + str(region_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveExec", p.stdout.read().strip())
def mArchiveGet(remote_ref, local_file, debug=False, raw=False):
'''
Retrieve a single FITS image from a remote archive, using a basic URL GET
but with a structured output.
Required Arguments:
*remote_ref* [ value ]
URL of remote FITS file to retrieve (should be in quotes). See
mArchiveList for more information.
*local_file* [ value ]
Full path/filename of the retrieved file.
Optional Arguments:
*debug* [ True | False ]
Print additional debugging information.
*raw* [ True | False ]
"Raw" mode - use a raw HTTP GET (no "HTTP/1.1" etc in the header);
necessary for communication with some servers.
'''
command = "mArchiveGet"
if debug:
command += " -d"
if raw:
command += " -r"
command += ' "' + str(remote_ref) + '"'
command += " " + str(local_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveGet", p.stdout.read().strip())
def mArchiveList(survey, band, object_or_location, width, height, out_file):
'''
Given a location on the sky, archive name, and size in degrees, contact
the IRSA server to retrieve a list of archive images. The list contains
enough information to support mArchiveGet downloads.
Required Arguments:
*survey* [ value ]
Can be one of: 2MASS DSS SDSS DPOSS
*band* [ value ]
Case insensitive - can be one of: (2MASS) j, h, k (SDSS) u, g, r,
i, z (DPOSS) f, j, n (DSS) DSS1, DSS1R, DSS1B, DSS2, DSS2B, DSS2R,
DSS2IR
*object_or_location* [ value ]
Object name or coordinate string to be resolved by NED (if string
includes spaces, must be surrounded by double quotes)
*width* [ value ]
Width of area of interest, in degrees
*height* [ value ]
Height of area of interest, in degrees
*out_file* [ value ]
Path to output table
'''
command = "mArchiveList"
command += " " + str(survey)
command += " " + str(band)
command += ' "' + str(object_or_location) + '"'
command += " " + str(width)
command += " " + str(height)
command += " " + str(out_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveList", p.stdout.read().strip())
def mBackground(in_image, out_image, A, B, C, debug_level=None, no_area=False,
status_file=None):
'''
Remove a background plane from a FITS image. The background correction
applied to the image is specified as Ax+By+C, where (x,y) is the pixel
coordinate using the image center as the origin, and (A,B,C) are the
background plane parameters specified as linear coefficients. To run in
'table' mode, see mBackground_tab.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Output FITS file
*A, B, C* [ value ]
Corrections (as given by mFitplane or mFitExec)
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level.
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each data pixel)
*status_file* [ value ]
mBackground output and errors will be written to status_file
instead of stdout.
'''
command = "mBackground"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(A)
command += " " + str(B)
command += " " + str(C)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBackground", p.stdout.read().strip())
def mBackground_tab(in_image, out_image, images_table, corrections_table,
debug_level=None, no_area=False, status_file=None):
'''
Remove a background plane from a FITS image. The background correction
applied to the image is specified as Ax+By+C, where (x,y) is the pixel
coordinate using the image center as the origin, and (A,B,C) are the
background plane parameters specified as linear coefficients. This method
runs mBackground_tab in 'table' mode.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Output FITS file
*images_table* [ value ]
Image metadata table to retrieve the filenames of images.
*corrections_table* [ value ]
Table of corrections (from mFitplane and mFitExec) to apply to the
corresponding image (from images_table).
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level.
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each data pixel)
*status_file* [ value ]
mBackground_tab output and errors will be written to status_file
instead of stdout.
'''
command = "mBackground_tab"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(images_table)
command += " " + str(corrections_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBackground_tab", p.stdout.read().strip())
def mBestImage(images_table, ra, dec, debug_level=None):
'''
Given a list of images and a position on the sky, determine which image
covers the location "best" (i.e., the one where the position is farthest
from the nearest edge).
Required Arguments:
*images_table* [ value ]
Input table of image metadata (as generated by mImgtbl).
*ra* [ value ]
RA of location of interest (in degrees)
*dec* [ value ]
Declination of location of interest (in degrees)
Optional Arguments:
*debug_level* [ value ]
Turn on debugging to the specified level (1 or 2)
'''
command = "mBestImage"
if debug_level:
command += " -d %s" % str(debug_level)
command += " " + str(images_table)
command += " " + str(ra)
command += " " + str(dec)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBestImage", p.stdout.read().strip())
def mBgExec(images_table, corrections_table, corr_dir, proj_dir=None,
status_file=None, debug=False, no_area=False, mpi=False, n_proc=8):
'''
Runs mBackground on all the images in a metadata table, using the
corrections generated by mFitExec.
Required Arguments:
*images_table* [ value ]
Image metadata table generated by mImgtbl.
*corrections_table* [ value ]
Table of corrections generated by mFitExec
*corr_dir* [ value ]
Directory where output images should be written
Optional Arguments:
*proj_dir* [ value ]
Specifies the path to the directory containing the projected
images.
*status_file* [ value ]
Writes output message to status_file instead of to stdout
*debug* [ True | False ]
Turns on debugging
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each pixel)
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mBgExecMPI" % n_proc
else:
command = "mBgExec"
if proj_dir:
command += " -p %s" % str(proj_dir)
if status_file:
command += " -s %s" % str(status_file)
if debug:
command += " -d"
if no_area:
command += " -n"
command += " " + str(images_table)
command += " " + str(corrections_table)
command += " " + str(corr_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBgExec", p.stdout.read().strip())
def mBgModel(images_table, fits_table, corrections_table, n_iter=None,
level_only=False, debug_level=None, ref_img=None, status_file=None):
'''
mBgModel is a modelling/fitting program. It uses the image-to-image
difference parameter table created by mFitExec to interactively determine
a set of corrections to apply to each image in order to achieve a "best"
global fit.
Required Arguments:
*images_table* [ value ]
Image metadata table generated by mImgtbl.
*fits_table* [ value ]
Plane fitting table generated by mFitExec.
*corrections_table* [ value ]
Output table of background corrections
Optional Arguments:
*n_iter* [ value ]
Number of iterations (without option, defaults to 5000). Can be
between 1 and 32767.
*level_only* [ True | False ]
Calculate level adjustments only (ie, don't attempt to match the
slopes)
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*ref_img* [ value ]
Turns on additional debugging for the nth image in images_table.
*status_file* [ value ]
mBgModel output and errors are written to status_file instead of
to stdout.
'''
command = "mBgModel"
if n_iter:
command += " -i %s" % str(n_iter)
if level_only:
command += " -l"
if debug_level:
command += " -d %s" % str(debug_level)
if ref_img:
command += " -r %s" % str(ref_img)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(fits_table)
command += " " + str(corrections_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBgModel", p.stdout.read().strip())
def mCatMap(in_table, out_image, template_header, column=None, ref_mag=None,
debug_level=None, size=None):
'''
mCatMap is a point-source imaging program. The user defines a general
output FITS image, and its pixels are populated from a table of point
sources. The source fluxes (or just source counts) from the table are
added into the appropriate pixel to create an output image.
Required Arguments:
*in_table* [ value ]
Input table of source metadata.
*out_image* [ value ]
Path of output FITS file.
*template_header* [ value ]
ASCII header template defining output FITS file.
Optional Arguments:
*column* [ value ]
Name of the table column that contains flux levels. If not
specified, pixels will be populated with source counts rather than
summed flux values.
*ref_mag* [ value ]
Set a reference magnitude to use when calculating fluxes.
*debug_level* [ value ]
Turn on debugging to the specified level (1-3)
*size* [ value ]
Set a spread size for point sources (default is to use no spread).
Allowed values are 3 or 5.
'''
command = "mCatMap"
if column:
command += " -c %s" % str(column)
if ref_mag:
command += " -m %s" % str(ref_mag)
if debug_level:
command += " -d %s" % str(debug_level)
if size:
command += " -w %s" % str(size)
command += " " + str(in_table)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mCatMap", p.stdout.read().strip())
def mConvert(in_image, out_image, debug_level=None, status_file=None,
bitpix=None, min_val=None, max_val=None, blank_value=None):
'''
mConvert changes the datatype of an image. When converting to floating
point, no additional information is needed. However, when converting from
higher precision (e.g. 64-bit floating point) to lower (e.g. 16-bit
integer), scaling information is necessary. This can be given explicitly
by the user or guessed by the program.
Required Arguments:
*in_image* [ value ]
Input image filename
*out_image* [ value ]
Output image filename.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mBgModel output and errors are written to status_file instead of
to stdout.
*bitpix* [ value ]
BITPIX value for the ouput FITS file (default is -64). Possible
values are: 8 (character or unsigned binary integer), 16 (16-bit
integer), 32 (32-bit integer), -32 (single precision floating
point), -64 (double precision floating point).
*min_val* [ value ]
Pixel data value in the input image which should be treated as a
minimum (value of 0) in the output image when converting from
floating point to integer (default for BITPIX 8: 0; BITPIX 16:
-32767; BITPIX 32: -2147483647
*max_val* [ value ]
Pixel data value in the input image which should be treated as a
maximum (value of 255 or 32768) in the output image when
converting from floating point to integer (Default for BITPIX 8:
255; BITPIX 16: 32768; BITPIX 32: 2147483648)
*blank_value* [ value ]
If converting down to an integer scale: value to be used in the
output image to represent blank pixels (NaN) from the input image.
Default value is min_val.
'''
command = "mConvert"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if bitpix:
command += " -b %s" % str(bitpix)
if min_val:
command += " -min %s" % str(min_val)
if max_val:
command += " -max %s" % str(max_val)
if blank_value:
command += " -blank %s" % str(blank_value)
command += " " + str(in_image)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mConvert", p.stdout.read().strip())
def mDiff(in_image_1, in_image_2, out_image, template_header,
debug_level=None, no_area=False, status_file=None):
'''
mDiff calculates a simple difference between a single pair of overlapping
images. This is meant for use on reprojected images where the pixels
already line up exactly. mDiff analyzes an image metadata table to
determine a list of overlapping images. Each image is compared with every
other image to determine all overlapping image pairs. A pair of images
are deemed to overlap if any pixel around the perimeter of one image falls
within the boundary of the other image.
Required Arguments:
*in_image_1* [ value ]
First input FITS file (Also needs area image in1_area_image, or
use the no_area option)
*in_image_2* [ value ]
Second input FITS file.(Also needs area image in2_area_image, or
use the no_area option)
*out_image* [ value ]
Difference FITS image to be generated.
*template_header* [ value ]
FITS header template used to generate output image.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-4).
*no_area* [ True | False ]
No-area-images option. Creates difference image without requiring
pixel area FITS image
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mDiff"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image_1)
command += " " + str(in_image_2)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiff", p.stdout.read().strip())
def mDiffExec(diffs_table, template_header, diff_dir, proj_dir=None,
debug=False, no_area=False, status_file=None, mpi=False, n_proc=8):
'''
Runs mDiff on all the pairs identified by mOverlaps.
Required Arguments:
*diffs_table* [ value ]
Table generated by mOverlaps for the images in proj_dir.
*template_header* [ value ]
FITS header template for output files.
*diff_dir* [ value ]
Path to output files.
Optional Arguments:
*proj_dir* [ value ]
Specifies path to the directory containing reprojected input
images.
*debug* [ True | False ]
Turns on debugging.
*no_area* [ True | False ]
No-area-images option. Creates difference image without requiring
_area FITS images
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mDiffExecMPI" % n_proc
else:
command = "mDiffExec"
if proj_dir:
command += " -p %s" % str(proj_dir)
if debug:
command += " -d"
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(template_header)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiffExec", p.stdout.read().strip())
def mDiffFitExec(diffs_table, fits_table, diff_dir, debug=False,
status_file=None):
'''
Using the table of overlaps found by mOverlaps, mDiffFitExec runs both
mDiff and mFitplane for each record. The fitting parameters are written
to a file to be used by mBgModel.
Required Arguments:
*diffs_table* [ value ]
Overlap table generated by mOverlaps, the last column of which
contains the filenames of the difference images generated by
mDiffExec.
*fits_table* [ value ]
Output table of difference paramaters.
*diff_dir* [ value ]
Directory containing difference images.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
*status_file* [ value ]
Writes output message to status_file instead of to stdout
'''
command = "mDiffFitExec"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(fits_table)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiffFitExec", p.stdout.read().strip())
def mExec(survey, band, raw_dir=None, n_tile_x=None, n_tile_y=None,
level_only=False, keep=False, corners=False, output_image=None,
debug_level=None, region_header=None, header=None,
workspace_dir=None):
'''
The mExec module is a mosaicking executive for 2MASS, SDSS, and DSS data.
It includes remote data and metadata access. Alternatively, users can
mosaic a set of data already on disk.
Required Arguments:
*survey, band* [ value ]
If not mosaicking user-provided data (raw_dir option), must select
one of the following combinations of survey and band: 2MASS [j, h,
k] SDSS [u, g, r, i, z] DSS [DSS1, DSS1R, DSS1B, DSS2, DSS2B,
DSS2R, DSS2IR]
Optional Arguments:
*raw_dir* [ value ]
Provide path to directory containing original ("raw") data which
will be reprojected and mosaicked. Not necessary if using mExec to
retrieve remote data from the 2MASS, SDSS or DSS surveys.
*n_tile_x* [ value ]
Number of output tiles to create along the X-axis - default is 1
for a single mosaicked image.
*n_tile_y* [ value ]
Number of output tiles to create along the Y-axis - default is
equal to n_tile_x.
*level_only* [ True | False ]
"Level-only" option (see mBgModel)
*keep* [ True | False ]
If retrieving data from a remote archive, the "keep" option will
leave the original data products on disk after generating a
mosaic. Without this option, raw data will be deleted (unless it
was provided by the user with the "-r" option).
*corners* [ True | False ]
Remove all temporary files and intermediate data products. Note:
if not using the '-o' option to specify an output file, this will
also remove mosaic_image.
*output_image* [ value ]
Provide your own filename for the output mosaic. Default filename
is "mosaic_image."
*debug_level* [ value ]
Print out additional debugging information (levels 1-4)
*region_header* [ value ]
Path to header template used to create mosaic.
*header* [ value ]
Provide header template as text input rather than point to a file;
see sample shell script that makes use of this option.
*workspace_dir* [ value ]
Directory where intermediate files will be created. If no
workspace is given, a unique local subdirectory will be created
(e.g.; ./MOSAIC_AAAaa17v)
'''
command = "mExec"
if raw_dir:
command += " -r %s" % str(raw_dir)
if n_tile_x:
command += " -n %s" % str(n_tile_x)
if n_tile_y:
command += " -m %s" % str(n_tile_y)
if level_only:
command += " -l"
if keep:
command += " -k"
if corners:
command += " -c"
if output_image:
command += " -o %s" % str(output_image)
if debug_level:
command += " -d %s" % str(debug_level)
if region_header:
command += " -f %s" % str(region_header)
if header:
command += " -h %s" % str(header)
command += " " + str(survey)
command += " " + str(band)
if workspace_dir:
command += " %s" % str(workspace_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mExec", p.stdout.read().strip())
def mFitExec(diffs_table, fits_table, diff_dir, debug=False, status_file=None,
mpi=False, n_proc=8):
'''
Runs mFitplane on all the difference images identified by mOverlaps and
generated by mDiff or mDiffExec. mFitExec creates a table of image-to-
image difference parameters.
Required Arguments:
*diffs_table* [ value ]
Overlap table generated by mOverlaps, the last column of which
contains the filenames of the difference images generated by
mDiffExec.
*fits_table* [ value ]
Output table of difference paramaters.
*diff_dir* [ value ]
Directory containing difference images.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
*status_file* [ value ]
Writes output message to status_file instead of to stdout
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mFitExecMPI" % n_proc
else:
command = "mFitExec"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(fits_table)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFitExec", p.stdout.read().strip())
def mFitplane(in_image, border=None, debug_level=None, status_file=None):
'''
Uses least squares to fit a plane (excluding outlier pixels) to an image.
It is used on the difference images generated using mDiff or mDiffExec.
Required Arguments:
*in_image* [ value ]
Input FITS file is a difference file between two other FITS files,
as can be generated using mDiff.
Optional Arguments:
*border* [ value ]
Number of border pixels to ignore at edges of image.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
'''
command = "mFitplane"
if border:
command += " -b %s" % str(border)
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFitplane", p.stdout.read().strip())
def mFixNan(in_image, out_image, debug_level=None, nan_value=None,
min_blank=None, max_blank=None):
'''
Converts NaNs found in the image to some other value (given by the user),
or converts a range of supplied values into NaNs.
Required Arguments:
*in_image* [ value ]
Input FITS image file
*out_image* [ value ]
Path of output FITS file. To run in "count" mode without creating
an output file, use a dash ("-") for this argument.
Optional Arguments:
*debug_level* [ value ]
Turn on debugging to the specified level (1-3)
*nan_value* [ value ]
Value to use in place of an_y NaNs
*min_blank, max_blank* [ value ]
If the nan_value option is not used, mFixNaN will replace all
pixel values between min_blank and max_blank with NaN.
'''
command = "mFixNan"
if debug_level:
command += " -d %s" % str(debug_level)
if nan_value:
command += " -v %s" % str(nan_value)
command += " " + str(in_image)
command += " " + str(out_image)
if min_blank and max_blank:
command += " %s" % str(min_blank)
command += " %s" % str(max_blank)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFixNan", p.stdout.read().strip())
def mFlattenExec(images_table, flat_dir, img_dir=None, debug=False,
no_area=False, status_file=None):
'''
Runs both mFitPlane and mBackground on a set of images.
Required Arguments:
*images_table* [ value ]
Metadata table (generated by mImgtbl) describing images to be
flattened.
*flat_dir* [ value ]
Path to directory where output files should be created.
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing images to be flattened.
*debug* [ True | False ]
Turns on debugging.
*no_area* [ True | False ]
No-area-images option, indicating that mFlattenExec should not
require area images for all the input FITS images.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mFlattenExec"
if img_dir:
command += " -p %s" % str(img_dir)
if debug:
command += " -d"
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(flat_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFlattenExec", p.stdout.read().strip())
def mGetHdr(in_image, img_header, debug=False, hdu=None, status_file=None):
'''
Reads in the header from a FITS image and prints it out to a text file.
Required Arguments:
*in_image* [ value ]
Path to FITS image from which to retrieve the header.
*img_header* [ value ]
Path to text file where FITS header should be written.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*hdu* [ value ]
Retrieve the header from the Fits extention given by hdu. "0"
indicates the primary FITS extension, and is the default used by
mGetHdr.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mGetHdr"
if debug:
command += " -d"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(img_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mGetHdr", p.stdout.read().strip())
def mHdr(object_or_location, width, out_file, system=None, equinox=None,
height=None, pix_size=None, rotation=None):
'''
Connects to the IRSA service HdrTemplate to create a header template based
on a location, size, resolution and rotation.
Required Arguments:
*object_or_location* [ value ]
Object string or coordinate location
*width* [ value ]
Width (x-axis) of area
*out_file* [ value ]
Path to output header template
Optional Arguments:
*system* [ value ]
Specify a coordinate system. Can be one of: "equatorial" or "eq"
(default), "ecliptic" or "ec" "galactic", "ga", "supergalactic" or
"sgal"
*equinox* [ value ]
Specify an equinox. Default is 2000.0
*height* [ value ]
Height (y-axis) of area in degrees. Default is equal to width
*pix_size* [ value ]
Size of a pixel (in arcsec); default is 1
*rotation* [ value ]
Rotation of image; default is 0
'''
command = "mHdr"
if system:
command += " -s %s" % str(system)
if equinox:
command += " -e %s" % str(equinox)
if height:
command += " -h %s" % str(height)
if pix_size:
command += " -p %s" % str(pix_size)
if rotation:
command += " -r %s" % str(rotation)
command += ' "' + str(object_or_location) + '"'
command += " " + str(width)
command += " " + str(out_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdr", p.stdout.read().strip())
def mHdrCheck(in_image, status_file=None):
'''
mHdrCheck reads in the header from a FITS image (or an ASCII header
template file) and checks to see if any header lines are invalid. If it
finds one, it will print out a message stating which keyword is invalid
and exit before checking the rest of the header. It will not report on
multiple invalid values. If all value are correct, mHdrCheck will print
out a "Valid FITS/WCS" message.
Required Arguments:
*in_image* [ value ]
Path of FITS file to be validated.
Optional Arguments:
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mHdrCheck"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdrCheck", p.stdout.read().strip())
def mHdrtbl(directory, images_table, recursive=False, corners=False,
debug=False, output_invalid=False, status_file=None, img_list=None):
'''
mHdrtbl operates in a fashion similar to mImgtbl, but is used on a set of
header template files instead of FITS images.
Required Arguments:
*directory* [ value ]
Path to directory containing set of input header templates.
*images_table* [ value ]
Path of output metadata table.
Optional Arguments:
*recursive* [ True | False ]
mHdrtbl can also be used as a standalone program to gather image
metadata for other purposes (to populate a database, as a basis
for spatial coverage searches, etc.) In this case it is often
desirable to collect information on all the files in a directory
tree recursively. The recursive option instructs mHdrtbl to search
the given directory and all its subdirectories recursively.
*corners* [ True | False ]
The corners option in mHdrtbl will cause eight extra columns to be
added to the output metadata table containing the RA, Dec
coordinates (ra1, dec1, ... ra4, dec4) of the image corners. The
output is always Equatorial J2000, even if the input is some other
system. This has been done to make the metadata uniform so that it
can easily be used for coverage searches, etc. The corners option
is not needed for normal Montage processing.
*debug* [ True | False ]
Turn on debugging
*output_invalid* [ True | False ]
When this option is set, mHdrtbl will explicitly output each
header file it finds that does not appear to be valid, along with
information on the error.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*img_list* [ value ]
mHdrtbl will only process files with names specified in table
img_list, ignoring an_y other files in the directory.
'''
command = "mHdrtbl"
if recursive:
command += " -r"
if corners:
command += " -c"
if debug:
command += " -d"
if output_invalid:
command += " -b"
if status_file:
command += " -s %s" % str(status_file)
if img_list:
command += " -t %s" % str(img_list)
command += " " + str(directory)
command += " " + str(images_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdrtbl", p.stdout.read().strip())
def mImgtbl(directory, images_table, recursive=False, corners=False,
include_area=False, debug=False, output_invalid=False,
status_file=None, fieldlist=None, img_list=None):
'''
mImgtbl extracts the FITS header geometry information from a set of files
and creates an ASCII image metadata table which is used by several of the
other programs. It only collects data from headers that comply with the
FITS standard, but reports a count of images that fail that check.
Required Arguments:
*directory* [ value ]
Path to directory containing set of input FITS files.
*images_table* [ value ]
Path of output metadata table.
Optional Arguments:
*recursive* [ True | False ]
mImgtbl can also be used as a standalone program to gather image
metadata for other purposes (to populate a database, as a basis
for spatial coverage searches, etc.) In this case it is often
desirable to collect information on all the files in a directory
tree recursively. The recursive option instructs mImgtbl to search
the given directory and all its subdirectories recursively.
*corners* [ True | False ]
The corners option in mImgtbl will cause eight extra columns to be
added to the output metadata table containing the RA, Dec
coordinates (ra1, dec1, ... ra4, dec4) of the image corners. The
output is always Equatorial J2000, even if the input is some other
system. Though not required for the core processing modules, we
recommend using this option, as some of the utilities may require
corner locations to function properly.
*include_area* [ True | False ]
By default, mImgtbl ignores FITS files with names ending in _area
(i.e. name_area_image), assuming them to be Montage-created area
images. If you want to generate information on these images, or if
you have images with _area in the title other than those generated
by Montage, you should turn on this option to force mImgtbl to
look at all images in the directory.
*debug* [ True | False ]
Turn on debugging
*output_invalid* [ True | False ]
When this option is set, mImgtbl will explicitly output each FITS
file it finds that does not appear to be valid, along with
information on the error.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*fieldlist* [ value ]
Used to specify a fieldlist, which will list additional keywords
to be read from the FITS headers and included in the output table.
Fieldlists should specify the keyword name, type
(int,char,double), and size.
*img_list* [ value ]
mImgtbl will only process files with names specified in table
img_list, ignoring an_y other files in the directory.
'''
command = "mImgtbl"
if recursive:
command += " -r"
if corners:
command += " -c"
if include_area:
command += " -a"
if debug:
command += " -d"
if output_invalid:
command += " -b"
if status_file:
command += " -s %s" % str(status_file)
if fieldlist:
command += " -f %s" % str(fieldlist)
if img_list:
command += " -t %s" % str(img_list)
command += " " + str(directory)
command += " " + str(images_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mImgtbl", p.stdout.read().strip())
def mMakeHdr(images_table, template_header, debug_level=None,
status_file=None, cdelt=None, north_aligned=False, system=None,
equinox=None):
'''
From a list of images to be mosaicked together, mMakeHdr generates the
FITS header that best describes the output image.
Required Arguments:
*images_table* [ value ]
Metadata table (generated by mImgtbl) describing the images to be
mosaicked.
*template_header* [ value ]
Path to header template to be generated.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
Output and errors are written to status_file instead of to stdout.
*cdelt* [ value ]
Specify a pixel scale for the header, if different from the input
images
*north_aligned* [ True | False ]
"North-aligned" option. By default, the FITS header generated
represents the best fit to the images, often resulting in a slight
rotation. If you want north to be straight up in your final
mosaic, you should use this option.
*system* [ value ]
Specifies the system for the header (default is Equatorial).
Possible values are: EQUJ EQUB ECLJ ECLB GAL SGAL
*equinox* [ value ]
If a coordinate system is specified, the equinox can also be given
in the form YYYY. Default is J2000.
'''
command = "mMakeHdr"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if cdelt:
command += " -p %s" % str(cdelt)
if north_aligned:
command += " -n"
command += " " + str(images_table)
command += " " + str(template_header)
if system:
command += " %s" % str(system)
if equinox:
command += " %s" % str(equinox)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mMakeHdr", p.stdout.read().strip())
def mOverlaps(images_table, diffs_table, exact=False, debug_level=None,
status_file=None):
'''
Analyze an image metadata table to determine a list of overlapping images.
Each image is compared with every other image to determine all overlapping
image pairs. A pair of images are deemed to overlap if any pixel around
the perimeter of one image falls within the boundary of the other image.
Required Arguments:
*images_table* [ value ]
Table of image metadata generated by mImgtbl.
*diffs_table* [ value ]
Path of output table to be generated containing overlap
information.
Optional Arguments:
*exact* [ True | False ]
Enables 'exact' overlaps mode, as opposed to the default
approximate algorithm. The default mode uses great-circle
connecting lines between image corners to determine which images
overlap. Exact mode will instead check the edge pixels of every
image to determine which pixels are inside the others. Although
the default mode will occasionally report some incorrect overlaps,
this is not a concern since mDiff will detect and ignore these
false positive results when processing the table.
*debug_level* [ value ]
Turns on debugging to the specified level (1 or 2)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mOverlaps"
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(diffs_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mOverlaps", p.stdout.read().strip())
def mPix2Coord(template_header, ixpix, jypix, debug=False):
'''
Takes an image FITS header template and a pixel (x,y) coordinate, and
outputs the corresponding sky location.
Required Arguments:
*template_header* [ value ]
ASCII header template describing the image (either a FITS image,
or a JPEG file created from the FITS file)
*ixpix* [ value ]
X coordinate (pixel location) on image
*jypix* [ value ]
Y coordinate (pixel location) on image
Optional Arguments:
*debug* [ True | False ]
Print out additional debugging information
'''
command = "mPix2Coord"
if debug:
command += " -d"
command += " " + str(template_header)
command += " " + str(ixpix)
command += " " + str(jypix)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mPix2Coord", p.stdout.read().strip())
def mProject(in_image, out_image, template_header, factor=None,
debug_level=None, status_file=None, hdu=None, scale=None,
weight_file=None, threshold=None, whole=False):
'''
mProject reprojects a single image to the scale defined in a FITS header
template file (read more about header templates here). The program
produces a pair of images: the reprojected image and an "area" image
consisting of the fraction input pixel sky area that went into each output
pixel. The "drizzle" algorithm is implemented. The algorithm proceeds by
mapping pixel corners (as adjusted by drizzle, if called) from the input
pixel space to the output pixel space, calculating overlap area with each
output pixel, and accumulating an appropriate fraction of the input flux
into the output image pixels. In addition, the appropriate fraction of
the input pixel area is accumulated into the area image pixels.
Projection of points from input pixel space to output pixel space is
calculated in two steps: first map from input pixel space to sky
coordinates; second map from sky coordinates to output pixel space.
Required Arguments:
*in_image* [ value ]
Input FITS file to be reprojected.
*out_image* [ value ]
Path of output FITS file to be created.
*template_header* [ value ]
FITS header template to be used in generation of output image
Optional Arguments:
*factor* [ value ]
Processing is done utilizing the drizzle algorithm. factor is a
floating point number; recommended drizzle factors are from 0.5 to
1.
*debug_level* [ value ]
Causes additional debugging information to be printed to stdout.
Valid levels are 1-5 (for higher debugging levels, it is
recommended to redirect the output to a file).
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*hdu* [ value ]
Use the specified FITS extension (default is to use the first HDU
with image data)
*scale* [ value ]
Apply a correction factor of scale to each pixel
*weight_file* [ value ]
Path to a weight map to be used when reading values from the input
image.
*threshold* [ value ]
Pixels with weights below threshold will be treated as blank.
*whole* [ True | False ]
Makes the output region (originally defined in the header
template) big enough to include all of the input images
'''
command = "mProject"
if factor:
command += " -z %s" % str(factor)
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if hdu:
command += " -h %s" % str(hdu)
if scale:
command += " -x %s" % str(scale)
if weight_file:
command += " -w %s" % str(weight_file)
if threshold:
command += " -t %s" % str(threshold)
if whole:
command += " -X"
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProject", p.stdout.read().strip())
def mProjectPP(in_image, out_image, template_header, factor=None,
debug_level=None, border=None, status_file=None,
alternate_header=None, hdu=None, scale=None, weight_file=None,
threshold=None, whole=False):
'''
mProjectPP reprojects a single image to the scale defined in an alternate
FITS header template generated (usually) by mTANhdr. The program produces
a pair of images: the reprojected image and an "area" image consisting of
the fraction input pixel sky area that went into each output pixel. This
area image goes through all the subsequent processing that the reprojected
image does, allowing it to be properly coadded at the end.
Required Arguments:
*in_image* [ value ]
Input FITS file to be reprojected.
*out_image* [ value ]
Path to output FITS file to be created.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
Optional Arguments:
*factor* [ value ]
Processing is done utilizing the drizzle algorithm. factor is a
floating point number; recommended drizzle factors are from 0.5 to
1.
*debug_level* [ value ]
Causes additional debugging information to be printed to stdout.
Valid levels are 1-5; for levels greater than 1, it's recommended
to redirect the output into a text file.
*border* [ value ]
Ignores border pixels around the image edge when performing
calculations.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*alternate_header* [ value ]
Specifies an alternate FITS header for use in mProjectPP
calculations, allows substitution of psuedo-TAN headers created by
mTANHdr.
*hdu* [ value ]
Specify the FITS extension to re-project if the FITS image is
multi-extension.
*scale* [ value ]
Multiple the pixel values by scale when reprojecting. For
instance, each 2MASS image has a different scale factor (very near
1.0) to correct for varying magnitude-zero points.
*weight_file* [ value ]
Path to a weight map to be used when reading values from the input
image.
*threshold* [ value ]
If using a weight image; only use those pixels where the weight
value is above threshold.
*whole* [ True | False ]
Reproject the whole image even if part of it is outside the region
of interest (don't crop while re-projecting).
'''
command = "mProjectPP"
if factor:
command += " -z %s" % str(factor)
if debug_level:
command += " -d %s" % str(debug_level)
if border:
command += " -b %s" % str(border)
if status_file:
command += " -s %s" % str(status_file)
if alternate_header:
command += " -[i|o] %s" % str(alternate_header)
if hdu:
command += " -h %s" % str(hdu)
if scale:
command += " -x %s" % str(scale)
if weight_file:
command += " -w %s" % str(weight_file)
if threshold:
command += " -t %s" % str(threshold)
if whole:
command += " -X"
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProjectPP", p.stdout.read().strip())
def mProjExec(images_table, template_header, proj_dir, stats_table,
raw_dir=None, debug=False, exact=False, whole=False, border=None,
restart_rec=None, status_file=None, scale_column=None, mpi=False,
n_proc=8):
'''
An executive which runs mProject (or, if possible for the input/output
projections, mProjectPP) for each image in an image metadata table.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be reprojected.
*template_header* [ value ]
FITS header template to be used in generation of output FITS.
*proj_dir* [ value ]
Directory in which to create reprojected images.
*stats_table* [ value ]
Name of table for output statistics (time of each reprojection, or
error messages).
Optional Arguments:
*raw_dir* [ value ]
Specifies the path to the directory containing the images to be
reprojected. If the -p option is not included, mProjExec looks for
the images in the current working directory.
*debug* [ True | False ]
Turns on debugging
*exact* [ True | False ]
Flag indicating output image should exactly match the FITS header
template, and not crop off blank pixels
*whole* [ True | False ]
Force reprojection of whole images, even if they exceed the area
of the FITS header template
*border* [ value ]
Ignore border width of pixels around edge of images
*restart_rec* [ value ]
Allows restart at record number restart_rec, if mProjExec exits
upon an error
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*scale_column* [ value ]
Turn on flux rescaling (e.g. magnitude zero point correction):
scale_column is the name of a column in images_table which
contains scale information.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mProjExecMPI" % n_proc
else:
command = "mProjExec"
if raw_dir:
command += " -p %s" % str(raw_dir)
if debug:
command += " -d"
if exact:
command += " -e"
if whole:
command += " -X"
if border:
command += " -b %s" % str(border)
if restart_rec:
command += " -r %s" % str(restart_rec)
if status_file:
command += " -s %s" % str(status_file)
if scale_column:
command += " -x %s" % str(scale_column)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(proj_dir)
command += " " + str(stats_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProjExec", p.stdout.read().strip())
def mPutHdr(in_image, out_image, template_header, debug=False,
status_file=None, hdu=None):
'''
Replaces the header of the input file with one supplied by the user.
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file (with new header)
*template_header* [ value ]
ASCII header template to write into out_image.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging to the specified level (this version only
supports level "1").
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*hdu* [ value ]
Write to the specified FITS extnension (HDU).
'''
command = "mPutHdr"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
if hdu:
command += " -h %s" % str(hdu)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mPutHdr", p.stdout.read().strip())
def mRotate(in_image, out_image, debug_level=None, status_file=None,
rotation_angle=None, ra=None, dec=None, xsize=None, ysize=None):
'''
Rotates a FITS image by an arbitrary angle. This module is meant for
quick-look only; it is not flux conserving.
Required Arguments:
*in_image* [ value ]
Input FITS image.
*out_image* [ value ]
Path to output (rotated) FITS image.
Optional Arguments:
*debug_level* [ value ]
Print out additional debugging information (level can be 1-3)
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
*rotation_angle* [ value ]
Provide an angle (in degrees) to rotate the image.
*ra, dec, xsize* [ value ]
Center location and width (in degrees) of output image - optional.
By default, entire input image area will be included in output
image.
*ysize* [ value ]
Height (in degrees) of output image, if a new center location and
width are provided. Only used if ra, dec, and xsize are specified.
Defaults to xsize.
'''
command = "mRotate"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if rotation_angle:
command += " -r %s" % str(rotation_angle)
command += " " + str(in_image)
command += " " + str(out_image)
if ra and dec and xsize:
command += " %s" % str(ra)
command += " %s" % str(dec)
command += " %s" % str(xsize)
if ysize:
command += " %s" % str(ysize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mRotate", p.stdout.read().strip())
def mShrink(in_image, out_image, factor, fixed_size=False, debug_level=None,
status_file=None):
'''
A utility for reducing the size of a FITS file, by averaging blocks of
pixels.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Path to output FITS file.
*factor* [ value ]
Size of blocks, in pixels, to average. File size will be reduced
by 1/factor squared. If the fixed_size option is used, factor is
the desired width of the output image.
Optional Arguments:
*fixed_size* [ True | False ]
Fixed-size option - specify output size of image, instead of the
size of blocks of pixels to be averaged
*debug_level* [ value ]
Turns on debugging to the specified level (1-4).
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mShrink"
if fixed_size:
command += " -f"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(factor)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mShrink", p.stdout.read().strip())
def mSubimage(in_image, out_image, ra, dec, xsize, debug=False,
all_pixels=False, hdu=None, status_file=None, ysize=None):
'''
Creates a subimage (or "cutout") of a FITS file. To use mSubimage in
'pixel' mode, see mSubimage_pix
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file.
*ra* [ value ]
RA of center of output image.
*dec* [ value ]
Declination of center of output image.
*xsize* [ value ]
Width of output image in degrees.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*all_pixels* [ True | False ]
All pixels - Force retrieval of whole image (useful to extract an
entire HDU)
*hdu* [ value ]
Operate on the specified FITS header extension (HDU)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*ysize* [ value ]
Height of output image in degrees (default is equal to xsize.
'''
command = "mSubimage"
if debug:
command += " -d"
if all_pixels:
command += " -a"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(ra)
command += " " + str(dec)
command += " " + str(xsize)
if ysize:
command += " %s" % str(ysize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubimage", p.stdout.read().strip())
def mSubimage_pix(in_image, out_image, xstartpix, ystartpix, xpixsize,
debug=False, hdu=None, status_file=None, ypixsize=None):
'''
Creates a subimage (or "cutout") of a FITS file ('pixel' mode)
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file.
*xstartpix* [ value ]
Pixel along the x-axis where the cutout image will begin
*ystartpix* [ value ]
Pixel along the y-axis where the cutout image will begin
*xpixsize* [ value ]
Width of output image in pixels
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*hdu* [ value ]
Operate on the specified FITS header extension (HDU)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*ypixsize* [ value ]
Height of output image in pixels (default is equal to xpix_size
'''
command = "mSubimage -p"
if debug:
command += " -d"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(xstartpix)
command += " " + str(ystartpix)
command += " " + str(xpixsize)
if ypixsize:
command += " %s" % str(ypixsize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubimage_pix", p.stdout.read().strip())
def mSubset(images_table, template_header, subset_table, debug_level=None,
fast_mode=False, status_file=None):
'''
Generates a table of images that is a subset of the input table,
containing only those images that cover the area defined by a given FITS
header.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for image
collection.
*template_header* [ value ]
FITS header template defining the area of interest.
*subset_table* [ value ]
Path to output table, which will contain only those FITS images
covering the area defined by template_header.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*fast_mode* [ True | False ]
Fast mode - input file must include corners (corners option in
mImgtbl) to utilize this mode.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mSubset"
if debug_level:
command += " -d %s" % str(debug_level)
if fast_mode:
command += " -f"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(subset_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubset", p.stdout.read().strip())
def mTANHdr(orig_header, new_header, debug=False, order=None, max_iter=None,
tolerance=None, status_file=None):
'''
Analyzes a template file and determines if there would be an adequate
equivalent distorted TAN projection, within a specified tolerance, and
outputs the alternate header. This header can be used in conjunction with
mProjectPP to produce a TAN plane image. This process is considerably
faster than projecting with the general purpose tool mProject.
Required Arguments:
*orig_header* [ value ]
Input FITS header
*new_header* [ value ]
Path to output header to be created
Optional Arguments:
*debug* [ True | False ]
Print additional debugging information to stdout.
*order* [ value ]
Order of output header polynomial focal plane distortions (default
= 4)
*max_iter* [ value ]
Maximum number of iteration attempts to produce header (default =
50)
*tolerance* [ value ]
Distortion tolerance value for acceptable output (default = 0.01)
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
'''
command = "mTANHdr"
if debug:
command += " -d"
if order:
command += " -o %s" % str(order)
if max_iter:
command += " -i %s" % str(max_iter)
if tolerance:
command += " -t %s" % str(tolerance)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(orig_header)
command += " " + str(new_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTANHdr", p.stdout.read().strip())
def mTblSort(in_table, column_name, out_table, debug=False):
'''
Sorts a table on numeric values.
Required Arguments:
*in_table* [ value ]
Path to input table
*column_name* [ value ]
Name of column to sort on (column must contain numeric values)
*out_table* [ value ]
Path to output table
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
'''
command = "mTblSort"
if debug:
command += " -d"
command += " " + str(in_table)
command += " " + str(column_name)
command += " " + str(out_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTblSort", p.stdout.read().strip())
def mTileHdr(orig_header, new_header, n_x, n_y, ix, iy, debug=False,
status_file=None, xpad=None, ypad=None):
'''
Takes a header template file and creates another which represents one of a
regular set of tiles covering the original. The user specifies the tile
gridding and which tile is desired.
Required Arguments:
*orig_header* [ value ]
ASCII header template from which to derive tiled headers
*new_header* [ value ]
Path to output header
*n_x* [ value ]
Number of tiles in the x-direction
*n_y* [ value ]
Number of tiles in the y-direction
*ix* [ value ]
Integer identifying the x location of the output tile on the grid
(counting from 0)
*iy* [ value ]
Integer identifying the y location of the output tile on the grid
(counting from 0)
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*xpad* [ value ]
Number of pixels to overlap tiles in the x direction (default is
0)
*ypad* [ value ]
Number of pixels to overlap tiles in the y direction (default is
0). Only used if xpad is present.
'''
command = "mTileHdr"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(orig_header)
command += " " + str(new_header)
command += " " + str(n_x)
command += " " + str(n_y)
command += " " + str(ix)
command += " " + str(iy)
if xpad:
command += " %s" % str(xpad)
if ypad:
command += " %s" % str(ypad)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTileHdr", p.stdout.read().strip())
| mit | 3,596,807,204,537,435,000 | 32.998728 | 78 | 0.601075 | false |
thomdixon/elastalert | elastalert/elastalert.py | 1 | 54691 | # -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import sys
import time
import traceback
from email.mime.text import MIMEText
from smtplib import SMTP
from smtplib import SMTPException
from socket import error
import argparse
import kibana
from alerts import DebugAlerter
from config import get_rule_hashes
from config import load_configuration
from config import load_rules
from elasticsearch.client import Elasticsearch
from elasticsearch.exceptions import ElasticsearchException
from enhancements import DropMatchException
from util import dt_to_ts
from util import EAException
from util import format_index
from util import pretty_ts
from util import seconds
from util import ts_add
from util import ts_now
from util import ts_to_dt
class ElastAlerter():
""" The main Elastalert runner. This class holds all state about active rules,
controls when queries are run, and passes information between rules and alerts.
:param args: An argparse arguments instance. Should contain debug and start
:param conf: The configuration dictionary. At the top level, this
contains global options, and under 'rules', contains all state relating
to rules and alerts. In each rule in conf['rules'], the RuleType and Alerter
instances live under 'type' and 'alerts', respectively. The conf dictionary
should not be passed directly from a configuration file, but must be populated
by config.py:load_rules instead. """
def parse_args(self, args):
parser = argparse.ArgumentParser()
parser.add_argument('--config', action='store', dest='config', default="config.yaml", help='Global config file (default: config.yaml)')
parser.add_argument('--debug', action='store_true', dest='debug', help='Suppresses alerts and prints information instead')
parser.add_argument('--rule', dest='rule', help='Run only a specific rule (by filename, must still be in rules folder)')
parser.add_argument('--silence', dest='silence', help='Silence rule for a time period. Must be used with --rule. Usage: '
'--silence <units>=<number>, eg. --silence hours=2')
parser.add_argument('--start', dest='start', help='YYYY-MM-DDTHH:MM:SS Start querying from this timestamp. (Default: present)')
parser.add_argument('--end', dest='end', help='YYYY-MM-DDTHH:MM:SS Query to this timestamp. (Default: present)')
parser.add_argument('--verbose', action='store_true', dest='verbose', help='Increase verbosity without suppressing alerts')
parser.add_argument('--pin_rules', action='store_true', dest='pin_rules', help='Stop ElastAlert from monitoring config file changes')
parser.add_argument('--es_debug', action='store_true', dest='es_debug', help='Enable verbose logging from Elasticsearch queries')
parser.add_argument('--es_debug_trace', action='store', dest='es_debug_trace', default="/tmp/es_trace.log", help='Enable logging from Elasticsearch queries as curl command. Queries will be logged to file (default: /tmp/es_trace.log)')
self.args = parser.parse_args(args)
def __init__(self, args):
self.parse_args(args)
self.debug = self.args.debug
self.verbose = self.args.verbose
if self.debug:
self.verbose = True
if self.verbose:
logging.getLogger().setLevel(logging.INFO)
if not self.args.es_debug:
logging.getLogger('elasticsearch').setLevel(logging.WARNING)
if self.args.es_debug_trace:
tracer = logging.getLogger('elasticsearch.trace')
tracer.setLevel(logging.INFO)
tracer.addHandler(logging.FileHandler(self.args.es_debug_trace))
self.conf = load_rules(self.args)
self.max_query_size = self.conf['max_query_size']
self.rules = self.conf['rules']
self.writeback_index = self.conf['writeback_index']
self.run_every = self.conf['run_every']
self.alert_time_limit = self.conf['alert_time_limit']
self.old_query_limit = self.conf['old_query_limit']
self.disable_rules_on_error = self.conf['disable_rules_on_error']
self.notify_email = self.conf.get('notify_email')
self.from_addr = self.conf.get('from_addr', 'ElastAlert')
self.smtp_host = self.conf.get('smtp_host', 'localhost')
self.alerts_sent = 0
self.num_hits = 0
self.current_es = None
self.current_es_addr = None
self.buffer_time = self.conf['buffer_time']
self.silence_cache = {}
self.rule_hashes = get_rule_hashes(self.conf, self.args.rule)
self.starttime = self.args.start
self.disabled_rules = []
self.es_conn_config = self.build_es_conn_config(self.conf)
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
for rule in self.rules:
rule = self.init_rule(rule)
if self.args.silence:
self.silence()
@staticmethod
def new_elasticsearch(es_conn_conf):
""" returns an Elasticsearch instance configured using an es_conn_config """
return Elasticsearch(host=es_conn_conf['es_host'],
port=es_conn_conf['es_port'],
url_prefix=es_conn_conf['es_url_prefix'],
use_ssl=es_conn_conf['use_ssl'],
http_auth=es_conn_conf['http_auth'],
timeout=es_conn_conf['es_conn_timeout'])
@staticmethod
def build_es_conn_config(conf):
""" Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port'
'es_username' and 'es_password', this will return a new dictionary
with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which
will be a basicauth username:password formatted string """
parsed_conf = {}
parsed_conf['use_ssl'] = False
parsed_conf['http_auth'] = None
parsed_conf['es_username'] = None
parsed_conf['es_password'] = None
parsed_conf['es_host'] = conf['es_host']
parsed_conf['es_port'] = conf['es_port']
parsed_conf['es_url_prefix'] = ''
parsed_conf['es_conn_timeout'] = 10
if 'es_username' in conf:
parsed_conf['es_username'] = conf['es_username']
parsed_conf['es_password'] = conf['es_password']
if parsed_conf['es_username'] and parsed_conf['es_password']:
parsed_conf['http_auth'] = parsed_conf['es_username'] + ':' + parsed_conf['es_password']
if 'use_ssl' in conf:
parsed_conf['use_ssl'] = conf['use_ssl']
if 'es_conn_timeout' in conf:
parsed_conf['es_conn_timeout'] = conf['es_conn_timeout']
if 'es_url_prefix' in conf:
parsed_conf['es_url_prefix'] = conf['es_url_prefix']
return parsed_conf
@staticmethod
def get_index(rule, starttime=None, endtime=None):
""" Gets the index for a rule. If strftime is set and starttime and endtime
are provided, it will return a comma seperated list of indices. If strftime
is set but starttime and endtime are not provided, it will replace all format
tokens with a wildcard. """
index = rule['index']
if rule.get('use_strftime_index'):
if starttime and endtime:
return format_index(index, starttime, endtime)
else:
# Replace the substring containing format characters with a *
format_start = index.find('%')
format_end = index.rfind('%') + 2
return index[:format_start] + '*' + index[format_end:]
else:
return index
@staticmethod
def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp'):
""" Returns a query dict that will apply a list of filters, filter by
start and end time, and sort results by timestamp.
:param filters: A list of elasticsearch filters to use.
:param starttime: A timestamp to use as the start time of the query.
:param endtime: A timestamp to use as the end time of the query.
:param sort: If true, sort results by timestamp. (Default True)
:return: A query dictionary to pass to elasticsearch.
"""
starttime = dt_to_ts(starttime)
endtime = dt_to_ts(endtime)
filters = copy.copy(filters)
query = {'filter': {'bool': {'must': filters}}}
if starttime and endtime:
query['filter']['bool']['must'].append({'range': {timestamp_field: {'from': starttime,
'to': endtime}}})
if sort:
query['sort'] = [{timestamp_field: {'order': 'asc'}}]
return query
def get_terms_query(self, query, size, field):
""" Takes a query generated by get_query and outputs a aggregation query """
if 'sort' in query:
query.pop('sort')
query.update({'aggs': {'counts': {'terms': {'field': field, 'size': size}}}})
aggs_query = {'aggs': {'filtered': query}}
return aggs_query
def get_index_start(self, index, timestamp_field='@timestamp'):
""" Query for one result sorted by timestamp to find the beginning of the index.
:param index: The index of which to find the earliest event.
:return: Timestamp of the earliest event.
"""
query = {'sort': {timestamp_field: {'order': 'asc'}}}
try:
res = self.current_es.search(index=index, size=1, body=query, _source_include=[timestamp_field], ignore_unavailable=True)
except ElasticsearchException as e:
self.handle_error("Elasticsearch query error: %s" % (e), {'index': index})
return '1969-12-30T00:00:00Z'
if len(res['hits']['hits']) == 0:
# Index is completely empty, return a date before the epoch
return '1969-12-30T00:00:00Z'
timestamp = res['hits']['hits'][0]['_source'][timestamp_field]
return timestamp
@staticmethod
def process_hits(rule, hits):
""" Process results from Elasticearch. This replaces timestamps with datetime objects
and creates compound query_keys. """
for hit in hits:
hit['_source'][rule['timestamp_field']] = ts_to_dt(hit['_source'][rule['timestamp_field']])
if rule.get('compound_query_key'):
values = [hit['_source'].get(key, 'None') for key in rule['compound_query_key']]
hit['_source'][rule['query_key']] = ', '.join(values)
def get_hits(self, rule, starttime, endtime, index):
""" Query elasticsearch for the given rule and return the results.
:param rule: The rule configuration.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A list of hits, bounded by self.max_query_size.
"""
query = self.get_query(rule['filter'], starttime, endtime, timestamp_field=rule['timestamp_field'])
try:
res = self.current_es.search(index=index, size=self.max_query_size, body=query, _source_include=rule['include'], ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running query: %s' % (e), {'rule': rule['name']})
return None
hits = res['hits']['hits']
self.num_hits += len(hits)
lt = rule.get('use_local_time')
logging.info("Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), len(hits)))
self.process_hits(rule, hits)
# Record doc_type for use in get_top_counts
if 'doc_type' not in rule and len(hits):
rule['doc_type'] = hits[0]['_type']
return hits
def get_hits_count(self, rule, starttime, endtime, index):
""" Query elasticsearch for the count of results and returns a list of timestamps
equal to the endtime. This allows the results to be passed to rules which expect
an object for each hit.
:param rule: The rule configuration dictionary.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A dictionary mapping timestamps to number of hits for that time period.
"""
query = self.get_query(rule['filter'], starttime, endtime, timestamp_field=rule['timestamp_field'], sort=False)
query = {'query': {'filtered': query}}
try:
res = self.current_es.count(index=index, doc_type=rule['doc_type'], body=query, ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running count query: %s' % (e), {'rule': rule['name']})
return None
self.num_hits += res['count']
lt = rule.get('use_local_time')
logging.info("Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), res['count']))
return {endtime: res['count']}
def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=None):
rule_filter = copy.copy(rule['filter'])
if qk:
filter_key = rule['query_key']
if rule.get('raw_count_keys', True) and not rule['query_key'].endswith('.raw'):
filter_key += '.raw'
rule_filter.extend([{'term': {filter_key: qk}}])
base_query = self.get_query(rule_filter, starttime, endtime, timestamp_field=rule['timestamp_field'], sort=False)
if size is None:
size = rule.get('terms_size', 50)
query = self.get_terms_query(base_query, size, key)
try:
res = self.current_es.search(index=index, doc_type=rule['doc_type'], body=query, search_type='count', ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running query: %s' % (e), {'rule': rule['name']})
return None
if 'aggregations' not in res:
return {}
buckets = res['aggregations']['filtered']['counts']['buckets']
self.num_hits += len(buckets)
lt = rule.get('use_local_time')
logging.info('Queried rule %s from %s to %s: %s buckets' % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), len(buckets)))
return {endtime: buckets}
def remove_duplicate_events(self, data, rule):
# Remove data we've processed already
data = [event for event in data if event['_id'] not in rule['processed_hits']]
# Remember the new data's IDs
for event in data:
rule['processed_hits'][event['_id']] = event['_source'][rule['timestamp_field']]
return [event['_source'] for event in data]
def remove_old_events(self, rule):
# Anything older than the buffer time we can forget
now = ts_now()
remove = []
buffer_time = rule.get('buffer_time', self.buffer_time)
for _id, timestamp in rule['processed_hits'].iteritems():
if now - timestamp > buffer_time:
remove.append(_id)
map(rule['processed_hits'].pop, remove)
def run_query(self, rule, start=None, end=None):
""" Query for the rule and pass all of the results to the RuleType instance.
:param rule: The rule configuration.
:param start: The earliest time to query.
:param end: The latest time to query.
Returns True on success and False on failure.
"""
if start is None:
start = self.get_index_start(rule['index'])
if end is None:
end = ts_now()
# Reset hit counter and query
rule_inst = rule['type']
prev_num_hits = self.num_hits
max_size = rule.get('max_query_size', self.max_query_size)
index = self.get_index(rule, start, end)
if rule.get('use_count_query'):
data = self.get_hits_count(rule, start, end, index)
elif rule.get('use_terms_query'):
data = self.get_hits_terms(rule, start, end, index, rule['query_key'])
else:
data = self.get_hits(rule, start, end, index)
if data:
data = self.remove_duplicate_events(data, rule)
# There was an exception while querying
if data is None:
return False
elif data:
if rule.get('use_count_query'):
rule_inst.add_count_data(data)
elif rule.get('use_terms_query'):
rule_inst.add_terms_data(data)
else:
rule_inst.add_data(data)
# Warn if we hit max_query_size
if self.num_hits - prev_num_hits == max_size and not rule.get('use_count_query'):
logging.warning("Hit max_query_size (%s) while querying for %s" % (max_size, rule['name']))
return True
def get_starttime(self, rule):
""" Query ES for the last time we ran this rule.
:param rule: The rule configuration.
:return: A timestamp or None.
"""
query = {'filter': {'term': {'rule_name': '%s' % (rule['name'])}},
'sort': {'@timestamp': {'order': 'desc'}}}
try:
if self.writeback_es:
res = self.writeback_es.search(index=self.writeback_index, doc_type='elastalert_status',
size=1, body=query, _source_include=['endtime', 'rule_name'])
if res['hits']['hits']:
endtime = ts_to_dt(res['hits']['hits'][0]['_source']['endtime'])
if ts_now() - endtime < self.old_query_limit:
return endtime
else:
logging.info("Found expired previous run for %s at %s" % (rule['name'], endtime))
return None
except (ElasticsearchException, KeyError) as e:
self.handle_error('Error querying for last run: %s' % (e), {'rule': rule['name']})
self.writeback_es = None
return None
def set_starttime(self, rule, endtime):
""" Given a rule and an endtime, sets the appropriate starttime for it. """
# This means we are starting fresh
if 'starttime' not in rule:
# Try to get the last run from elasticsearch
last_run_end = self.get_starttime(rule)
if last_run_end:
rule['minimum_starttime'] = last_run_end
rule['starttime'] = last_run_end
return
# Use buffer for normal queries, or run_every increments otherwise
buffer_time = rule.get('buffer_time', self.buffer_time)
if not rule.get('use_count_query') and not rule.get('use_terms_query'):
buffer_delta = endtime - buffer_time
# If we started using a previous run, don't go past that
if 'minimum_starttime' in rule and rule['minimum_starttime'] > buffer_delta:
rule['starttime'] = rule['minimum_starttime']
# If buffer_time doesn't bring us past the previous endtime, use that instead
elif 'previous_endtime' in rule and rule['previous_endtime'] < buffer_delta:
rule['starttime'] = rule['previous_endtime']
else:
rule['starttime'] = buffer_delta
else:
# Query from the end of the last run, if it exists, otherwise a run_every sized window
rule['starttime'] = rule.get('previous_endtime', endtime - self.run_every)
def get_segment_size(self, rule):
""" The segment size is either buffer_size for normal queries or run_every for
count style queries. This mimicks the query size for when ElastAlert is running continuously. """
if not rule.get('use_count_query') and not rule.get('use_terms_query'):
return rule.get('buffer_time', self.buffer_time)
return self.run_every
def run_rule(self, rule, endtime, starttime=None):
""" Run a rule for a given time period, including querying and alerting on results.
:param rule: The rule configuration.
:param starttime: The earliest timestamp to query.
:param endtime: The latest timestamp to query.
:return: The number of matches that the rule produced.
"""
run_start = time.time()
rule_es_conn_config = self.build_es_conn_config(rule)
self.current_es = self.new_elasticsearch(rule_es_conn_config)
self.current_es_addr = (rule['es_host'], rule['es_port'])
# If there are pending aggregate matches, try processing them
for x in range(len(rule['agg_matches'])):
match = rule['agg_matches'].pop()
self.add_aggregated_alert(match, rule)
# Start from provided time if it's given
if starttime:
rule['starttime'] = starttime
else:
self.set_starttime(rule, endtime)
rule['original_starttime'] = rule['starttime']
# Don't run if starttime was set to the future
if ts_now() <= rule['starttime']:
logging.warning("Attempted to use query start time in the future (%s), sleeping instead" % (starttime))
return 0
# Run the rule. If querying over a large time period, split it up into segments
self.num_hits = 0
segment_size = self.get_segment_size(rule)
while endtime - rule['starttime'] > segment_size:
tmp_endtime = rule['starttime'] + segment_size
if not self.run_query(rule, rule['starttime'], tmp_endtime):
return 0
rule['starttime'] = tmp_endtime
rule['type'].garbage_collect(tmp_endtime)
if not self.run_query(rule, rule['starttime'], endtime):
return 0
rule['type'].garbage_collect(endtime)
# Process any new matches
num_matches = len(rule['type'].matches)
while rule['type'].matches:
match = rule['type'].matches.pop(0)
# If realert is set, silence the rule for that duration
# Silence is cached by query_key, if it exists
# Default realert time is 0 seconds
# concatenate query_key (or none) with rule_name to form silence_cache key
if 'query_key' in rule:
try:
key = '.' + str(match[rule['query_key']])
except KeyError:
# Some matches may not have a query key
# Use a special token for these to not clobber all alerts
key = '._missing'
else:
key = ''
if self.is_silenced(rule['name'] + key) or self.is_silenced(rule['name']):
logging.info('Ignoring match for silenced rule %s%s' % (rule['name'], key))
continue
if rule['realert']:
next_alert, exponent = self.next_alert_time(rule, rule['name'] + key, ts_now())
self.set_realert(rule['name'] + key, next_alert, exponent)
# If no aggregation, alert immediately
if not rule['aggregation']:
self.alert([match], rule)
continue
# Add it as an aggregated match
self.add_aggregated_alert(match, rule)
# Mark this endtime for next run's start
rule['previous_endtime'] = endtime
time_taken = time.time() - run_start
# Write to ES that we've run this rule against this time period
body = {'rule_name': rule['name'],
'endtime': endtime,
'starttime': rule['original_starttime'],
'matches': num_matches,
'hits': self.num_hits,
'@timestamp': ts_now(),
'time_taken': time_taken}
self.writeback('elastalert_status', body)
return num_matches
def init_rule(self, new_rule, new=True):
''' Copies some necessary non-config state from an exiting rule to a new rule. '''
if 'download_dashboard' in new_rule['filter']:
# Download filters from kibana and set the rules filters to them
db_filters = self.filters_from_kibana(new_rule, new_rule['filter']['download_dashboard'])
if db_filters is not None:
new_rule['filter'] = db_filters
else:
raise EAException("Could not download filters from %s" % (new_rule['filter']['download_dashboard']))
blank_rule = {'agg_matches': [],
'current_aggregate_id': None,
'processed_hits': {}}
rule = blank_rule
# Set rule to either a blank template or existing rule with same name
if not new:
for rule in self.rules:
if rule['name'] == new_rule['name']:
break
else:
logging.warning("Couldn't find existing rule %s, starting from scratch" % (new_rule['name']))
rule = blank_rule
copy_properties = ['agg_matches',
'current_aggregate_id',
'processed_hits',
'starttime']
for prop in copy_properties:
if prop == 'starttime' and 'starttime' not in rule:
continue
new_rule[prop] = rule[prop]
return new_rule
def load_rule_changes(self):
''' Using the modification times of rule config files, syncs the running rules
to match the files in rules_folder by removing, adding or reloading rules. '''
rule_hashes = get_rule_hashes(self.conf, self.args.rule)
# Check each current rule for changes
for rule_file, hash_value in self.rule_hashes.iteritems():
if rule_file not in rule_hashes:
# Rule file was deleted
logging.info('Rule file %s not found, stopping rule execution' % (rule_file))
self.rules = [rule for rule in self.rules if rule['rule_file'] != rule_file]
continue
if hash_value != rule_hashes[rule_file]:
# Rule file was changed, reload rule
try:
new_rule = load_configuration(rule_file)
except EAException as e:
self.handle_error('Could not load rule %s: %s' % (rule_file, e))
continue
logging.info("Reloading configuration for rule %s" % (rule_file))
# Re-enable if rule had been disabled
for disabled_rule in self.disabled_rules:
if disabled_rule['name'] == new_rule['name']:
self.rules.append(disabled_rule)
self.disabled_rules.remove(disabled_rule)
break
# Initialize the rule that matches rule_file
self.rules = [rule if rule['rule_file'] != rule_file else self.init_rule(new_rule, False) for rule in self.rules]
# Load new rules
if not self.args.rule:
for rule_file in set(rule_hashes.keys()) - set(self.rule_hashes.keys()):
try:
new_rule = load_configuration(rule_file)
if new_rule['name'] in [rule['name'] for rule in self.rules]:
raise EAException("A rule with the name %s already exists" % (new_rule['name']))
except EAException as e:
self.handle_error('Could not load rule %s: %s' % (rule_file, e))
continue
logging.info('Loaded new rule %s' % (rule_file))
self.rules.append(self.init_rule(new_rule))
self.rule_hashes = rule_hashes
def start(self):
""" Periodically go through each rule and run it """
if self.starttime:
try:
self.starttime = ts_to_dt(self.starttime)
except (TypeError, ValueError):
self.handle_error("%s is not a valid ISO8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (self.starttime))
exit(1)
self.running = True
while self.running:
next_run = datetime.datetime.utcnow() + self.run_every
self.run_all_rules()
if next_run < datetime.datetime.utcnow():
continue
# Wait before querying again
sleep_duration = (next_run - datetime.datetime.utcnow()).seconds
self.sleep_for(sleep_duration)
def run_all_rules(self):
""" Run each rule one time """
# If writeback_es errored, it's disabled until the next query cycle
if not self.writeback_es:
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
self.send_pending_alerts()
next_run = datetime.datetime.utcnow() + self.run_every
for rule in self.rules:
# Set endtime based on the rule's delay
delay = rule.get('query_delay')
if hasattr(self.args, 'end') and self.args.end:
endtime = ts_to_dt(self.args.end)
elif delay:
endtime = ts_now() - delay
else:
endtime = ts_now()
try:
num_matches = self.run_rule(rule, endtime, self.starttime)
except EAException as e:
self.handle_error("Error running rule %s: %s" % (rule['name'], e), {'rule': rule['name']})
except Exception as e:
self.handle_uncaught_exception(e, rule)
else:
old_starttime = pretty_ts(rule.get('original_starttime'), rule.get('use_local_time'))
logging.info("Ran %s from %s to %s: %s query hits, %s matches,"
" %s alerts sent" % (rule['name'], old_starttime, pretty_ts(endtime, rule.get('use_local_time')),
self.num_hits, num_matches, self.alerts_sent))
self.alerts_sent = 0
self.remove_old_events(rule)
if next_run < datetime.datetime.utcnow():
# We were processing for longer than our refresh interval
# This can happen if --start was specified with a large time period
# or if we are running too slow to process events in real time.
logging.warning("Querying from %s to %s took longer than %s!" % (old_starttime, endtime, self.run_every))
# Only force starttime once
self.starttime = None
if not self.args.pin_rules:
self.load_rule_changes()
def stop(self):
""" Stop an elastalert runner that's been started """
self.running = False
def sleep_for(self, duration):
""" Sleep for a set duration """
logging.info("Sleeping for %s seconds" % (duration))
time.sleep(duration)
def generate_kibana4_db(self, rule, match):
''' Creates a link for a kibana4 dashboard which has time set to the match. '''
db_name = rule.get('use_kibana4_dashboard')
start = ts_add(match[rule['timestamp_field']], -rule.get('kibana4_start_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))))
end = ts_add(match[rule['timestamp_field']], rule.get('kibana4_end_timedelta', rule.get('timeframe', datetime.timedelta(minutes=10))))
link = kibana.kibana4_dashboard_link(db_name, start, end)
return link
def generate_kibana_db(self, rule, match):
''' Uses a template dashboard to upload a temp dashboard showing the match.
Returns the url to the dashboard. '''
db = copy.deepcopy(kibana.dashboard_temp)
# Set filters
for filter in rule['filter']:
if filter:
kibana.add_filter(db, filter)
kibana.set_included_fields(db, rule['include'])
# Set index
index = self.get_index(rule)
kibana.set_index_name(db, index)
return self.upload_dashboard(db, rule, match)
def upload_dashboard(self, db, rule, match):
''' Uploads a dashboard schema to the kibana-int elasticsearch index associated with rule.
Returns the url to the dashboard. '''
# Set time range
start = ts_add(match[rule['timestamp_field']], -rule.get('timeframe', datetime.timedelta(minutes=10)))
end = ts_add(match[rule['timestamp_field']], datetime.timedelta(minutes=10))
kibana.set_time(db, start, end)
# Set dashboard name
db_name = 'ElastAlert - %s - %s' % (rule['name'], end)
kibana.set_name(db, db_name)
# Add filter for query_key value
if 'query_key' in rule:
for qk in rule.get('compound_query_key', [rule['query_key']]):
if qk in match:
term = {'term': {qk: match[qk]}}
kibana.add_filter(db, term)
# Convert to json
db_js = json.dumps(db)
db_body = {'user': 'guest',
'group': 'guest',
'title': db_name,
'dashboard': db_js}
# Upload
rule_es_conn_config = self.build_es_conn_config(rule)
es = self.new_elasticsearch(rule_es_conn_config)
res = es.create(index='kibana-int',
doc_type='temp',
body=db_body)
# Return dashboard URL
kibana_url = rule.get('kibana_url')
if not kibana_url:
kibana_url = 'http://%s:%s/_plugin/kibana/' % (rule['es_host'],
rule['es_port'])
return kibana_url + '#/dashboard/temp/%s' % (res['_id'])
def get_dashboard(self, rule, db_name):
""" Download dashboard which matches use_kibana_dashboard from elasticsearch. """
rule_es_conn_config = self.build_es_conn_config(rule)
es = self.new_elasticsearch(rule_es_conn_config)
if not db_name:
raise EAException("use_kibana_dashboard undefined")
query = {'query': {'term': {'_id': db_name}}}
try:
res = es.search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard'])
except ElasticsearchException as e:
raise EAException("Error querying for dashboard: %s" % (e))
if res['hits']['hits']:
return json.loads(res['hits']['hits'][0]['_source']['dashboard'])
else:
raise EAException("Could not find dashboard named %s" % (db_name))
def use_kibana_link(self, rule, match):
""" Uploads an existing dashboard as a temp dashboard modified for match time.
Returns the url to the dashboard. """
# Download or get cached dashboard
dashboard = rule.get('dashboard_schema')
if not dashboard:
db_name = rule.get('use_kibana_dashboard')
dashboard = self.get_dashboard(rule, db_name)
if dashboard:
rule['dashboard_schema'] = dashboard
else:
return None
dashboard = copy.deepcopy(dashboard)
return self.upload_dashboard(dashboard, rule, match)
def filters_from_kibana(self, rule, db_name):
""" Downloads a dashboard from kibana and returns corresponding filters, None on error. """
try:
db = rule.get('dashboard_schema')
if not db:
db = self.get_dashboard(rule, db_name)
filters = kibana.filters_from_dashboard(db)
except EAException:
return None
return filters
def alert(self, matches, rule, alert_time=None):
""" Wraps alerting, kibana linking and enhancements in an exception handler """
try:
return self.send_alert(matches, rule, alert_time=None)
except Exception as e:
self.handle_uncaught_exception(e, rule)
def send_alert(self, matches, rule, alert_time=None):
""" Send out an alert.
:param matches: A list of matches.
:param rule: A rule configuration.
"""
if alert_time is None:
alert_time = ts_now()
# Compute top count keys
if rule.get('top_count_keys'):
for match in matches:
if 'query_key' in rule and rule['query_key'] in match:
qk = match[rule['query_key']]
else:
qk = None
start = ts_to_dt(match[rule['timestamp_field']]) - rule.get('timeframe', datetime.timedelta(minutes=10))
end = ts_to_dt(match[rule['timestamp_field']]) + datetime.timedelta(minutes=10)
keys = rule.get('top_count_keys')
counts = self.get_top_counts(rule, start, end, keys, qk=qk)
match.update(counts)
# Generate a kibana3 dashboard for the first match
if rule.get('generate_kibana_link') or rule.get('use_kibana_dashboard'):
try:
if rule.get('generate_kibana_link'):
kb_link = self.generate_kibana_db(rule, matches[0])
else:
kb_link = self.use_kibana_link(rule, matches[0])
except EAException as e:
self.handle_error("Could not generate kibana dash for %s match: %s" % (rule['name'], e))
else:
if kb_link:
matches[0]['kibana_link'] = kb_link
if rule.get('use_kibana4_dashboard'):
kb_link = self.generate_kibana4_db(rule, matches[0])
if kb_link:
matches[0]['kibana_link'] = kb_link
for enhancement in rule['match_enhancements']:
valid_matches = []
for match in matches:
try:
enhancement.process(match)
valid_matches.append(match)
except DropMatchException as e:
pass
except EAException as e:
self.handle_error("Error running match enhancement: %s" % (e), {'rule': rule['name']})
matches = valid_matches
if not matches:
return
# Don't send real alerts in debug mode
if self.debug:
alerter = DebugAlerter(rule)
alerter.alert(matches)
return
# Run the alerts
alert_sent = False
alert_exception = None
alert_pipeline = {}
for alert in rule['alert']:
# Alert.pipeline is a single object shared between every alerter
# This allows alerters to pass objects and data between themselves
alert.pipeline = alert_pipeline
try:
alert.alert(matches)
except EAException as e:
self.handle_error('Error while running alert %s: %s' % (alert.get_info()['type'], e), {'rule': rule['name']})
alert_exception = str(e)
else:
self.alerts_sent += 1
alert_sent = True
# Write the alert(s) to ES
agg_id = None
for match in matches:
alert_body = self.get_alert_body(match, rule, alert_sent, alert_time, alert_exception)
# Set all matches to aggregate together
if agg_id:
alert_body['aggregate_id'] = agg_id
res = self.writeback('elastalert', alert_body)
if res and not agg_id:
agg_id = res['_id']
def get_alert_body(self, match, rule, alert_sent, alert_time, alert_exception=None):
body = {'match_body': match}
body['rule_name'] = rule['name']
# TODO record info about multiple alerts
body['alert_info'] = rule['alert'][0].get_info()
body['alert_sent'] = alert_sent
body['alert_time'] = alert_time
# If the alert failed to send, record the exception
if not alert_sent:
body['alert_exception'] = alert_exception
return body
def writeback(self, doc_type, body):
# Convert any datetime objects to timestamps
for key in body.keys():
if isinstance(body[key], datetime.datetime):
body[key] = dt_to_ts(body[key])
if self.debug:
logging.info("Skipping writing to ES: %s" % (body))
return None
if '@timestamp' not in body:
body['@timestamp'] = dt_to_ts(ts_now())
if self.writeback_es:
try:
res = self.writeback_es.create(index=self.writeback_index,
doc_type=doc_type, body=body)
return res
except ElasticsearchException as e:
logging.exception("Error writing alert info to elasticsearch: %s" % (e))
self.writeback_es = None
return None
def find_recent_pending_alerts(self, time_limit):
""" Queries writeback_es to find alerts that did not send
and are newer than time_limit """
query = {'query': {'query_string': {'query': 'alert_sent:false'}},
'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
'to': dt_to_ts(ts_now())}}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query,
size=1000)
if res['hits']['hits']:
return res['hits']['hits']
except:
pass
return []
def send_pending_alerts(self):
pending_alerts = self.find_recent_pending_alerts(self.alert_time_limit)
for alert in pending_alerts:
_id = alert['_id']
alert = alert['_source']
try:
rule_name = alert.pop('rule_name')
alert_time = alert.pop('alert_time')
match_body = alert.pop('match_body')
except KeyError:
# Malformed alert, drop it
continue
agg_id = alert.get('aggregate_id', None)
if agg_id:
# Aggregated alerts will be taken care of by get_aggregated_matches
continue
# Find original rule
for rule in self.rules:
if rule['name'] == rule_name:
break
else:
# Original rule is missing, drop alert
continue
# Retry the alert unless it's a future alert
if ts_now() > ts_to_dt(alert_time):
aggregated_matches = self.get_aggregated_matches(_id)
if aggregated_matches:
matches = [match_body] + [agg_match['match_body'] for agg_match in aggregated_matches]
self.alert(matches, rule, alert_time=alert_time)
rule['current_aggregate_id'] = None
else:
self.alert([match_body], rule, alert_time=alert_time)
# Delete it from the index
try:
self.writeback_es.delete(index=self.writeback_index,
doc_type='elastalert',
id=_id)
except:
self.handle_error("Failed to delete alert %s at %s" % (_id, alert_time))
# Send in memory aggregated alerts
for rule in self.rules:
if rule['agg_matches']:
if ts_now() > rule['aggregate_alert_time']:
self.alert(rule['agg_matches'], rule)
rule['agg_matches'] = []
def get_aggregated_matches(self, _id):
""" Removes and returns all matches from writeback_es that have aggregate_id == _id """
query = {'query': {'query_string': {'query': 'aggregate_id:%s' % (_id)}}}
matches = []
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query)
for match in res['hits']['hits']:
matches.append(match['_source'])
self.writeback_es.delete(index=self.writeback_index,
doc_type='elastalert',
id=match['_id'])
except (KeyError, ElasticsearchException) as e:
self.handle_error("Error fetching aggregated matches: %s" % (e), {'id': _id})
return matches
def add_aggregated_alert(self, match, rule):
""" Save a match as a pending aggregate alert to elasticsearch. """
if not rule['current_aggregate_id'] or rule['aggregate_alert_time'] < ts_to_dt(match[rule['timestamp_field']]):
# First match, set alert_time
match_time = ts_to_dt(match[rule['timestamp_field']])
alert_time = match_time + rule['aggregation']
rule['aggregate_alert_time'] = alert_time
agg_id = None
else:
# Already pending aggregation, use existing alert_time
alert_time = rule['aggregate_alert_time']
agg_id = rule['current_aggregate_id']
logging.info('Adding alert for %s to aggregation, next alert at %s' % (rule['name'], alert_time))
alert_body = self.get_alert_body(match, rule, False, alert_time)
if agg_id:
alert_body['aggregate_id'] = agg_id
res = self.writeback('elastalert', alert_body)
# If new aggregation, save _id
if res and not agg_id:
rule['current_aggregate_id'] = res['_id']
# Couldn't write the match to ES, save it in memory for now
if not res:
rule['agg_matches'].append(match)
return res
def silence(self):
""" Silence an alert for a period of time. --silence and --rule must be passed as args. """
if self.debug:
logging.error('--silence not compatible with --debug')
exit(1)
if not self.args.rule:
logging.error('--silence must be used with --rule')
exit(1)
# With --rule, self.rules will only contain that specific rule
rule_name = self.rules[0]['name']
try:
unit, num = self.args.silence.split('=')
silence_time = datetime.timedelta(**{unit: int(num)})
# Double conversion to add tzinfo
silence_ts = ts_to_dt(dt_to_ts(silence_time + datetime.datetime.utcnow()))
except (ValueError, TypeError):
logging.error('%s is not a valid time period' % (self.args.silence))
exit(1)
if not self.set_realert(rule_name, silence_ts, 0):
logging.error('Failed to save silence command to elasticsearch')
exit(1)
logging.info('Success. %s will be silenced until %s' % (rule_name, silence_ts))
def set_realert(self, rule_name, timestamp, exponent):
""" Write a silence to elasticsearch for rule_name until timestamp. """
body = {'exponent': exponent,
'rule_name': rule_name,
'@timestamp': ts_now(),
'until': timestamp}
self.silence_cache[rule_name] = (timestamp, exponent)
return self.writeback('silence', body)
def is_silenced(self, rule_name):
""" Checks if rule_name is currently silenced. Returns false on exception. """
if rule_name in self.silence_cache:
if ts_now() < self.silence_cache[rule_name][0]:
return True
else:
return False
if self.debug:
return False
query = {'filter': {'term': {'rule_name': rule_name}},
'sort': {'until': {'order': 'desc'}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index, doc_type='silence',
size=1, body=query, _source_include=['until', 'exponent'])
except ElasticsearchException as e:
self.handle_error("Error while querying for alert silence status: %s" % (e), {'rule': rule_name})
return False
if res['hits']['hits']:
until_ts = res['hits']['hits'][0]['_source']['until']
exponent = res['hits']['hits'][0]['_source'].get('exponent', 0)
self.silence_cache[rule_name] = (ts_to_dt(until_ts), exponent)
if ts_now() < ts_to_dt(until_ts):
return True
return False
def handle_error(self, message, data=None):
''' Logs message at error level and writes message, data and traceback to Elasticsearch. '''
if not self.writeback_es:
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
logging.error(message)
body = {'message': message}
tb = traceback.format_exc()
body['traceback'] = tb.strip().split('\n')
if data:
body['data'] = data
self.writeback('elastalert_error', body)
def handle_uncaught_exception(self, exception, rule):
""" Disables a rule and sends a notifcation. """
self.handle_error('Uncaught exception running rule %s: %s' % (rule['name'], exception), {'rule': rule['name']})
if self.disable_rules_on_error:
self.rules = [running_rule for running_rule in self.rules if running_rule['name'] != rule['name']]
self.disabled_rules.append(rule)
if self.notify_email:
self.send_notification_email(exception=exception, rule=rule)
def send_notification_email(self, text='', exception=None, rule=None, subject=None):
email_body = text
if exception and rule:
if not subject:
subject = 'Uncaught exception in ElastAlert - %s' % (rule['name'])
email_body += '\n\n'
email_body += 'The rule %s has raised an uncaught exception.\n\n' % (rule['name'])
if self.disable_rules_on_error:
modified = ' or if the rule config file has been modified' if not self.args.pin_rules else ''
email_body += 'It has been disabled and will be re-enabled when ElastAlert restarts%s.\n\n' % (modified)
tb = traceback.format_exc()
email_body += tb
if isinstance(self.notify_email, basestring):
self.notify_email = [self.notify_email]
email = MIMEText(email_body)
email['Subject'] = subject if subject else 'ElastAlert notification'
email['To'] = ', '.join(self.notify_email)
email['From'] = self.from_addr
email['Reply-To'] = self.conf.get('email_reply_to', email['To'])
try:
smtp = SMTP(self.smtp_host)
smtp.sendmail(self.from_addr, self.notify_email, email.as_string())
except (SMTPException, error) as e:
self.handle_error('Error connecting to SMTP host: %s' % (e), {'email_body': email_body})
def get_top_counts(self, rule, starttime, endtime, keys, number=None, qk=None):
""" Counts the number of events for each unique value for each key field.
Returns a dictionary with top_events_<key> mapped to the top 5 counts for each key. """
all_counts = {}
if not number:
number = rule.get('top_count_number', 5)
for key in keys:
index = self.get_index(rule, starttime, endtime)
buckets = self.get_hits_terms(rule, starttime, endtime, index, key, qk, number).values()[0]
# get_hits_terms adds to num_hits, but we don't want to count these
self.num_hits -= len(buckets)
terms = {}
for bucket in buckets:
terms[bucket['key']] = bucket['doc_count']
counts = terms.items()
counts.sort(key=lambda x: x[1], reverse=True)
# Save a dict with the top 5 events by key
all_counts['top_events_%s' % (key)] = dict(counts[:number])
return all_counts
def next_alert_time(self, rule, name, timestamp):
""" Calculate an 'until' time and exponent based on how much past the last 'until' we are. """
if name in self.silence_cache:
last_until, exponent = self.silence_cache[name]
else:
# If this isn't cached, this is the first alert or writeback_es is down, normal realert
return timestamp + rule['realert'], 0
if not rule.get('exponential_realert'):
return timestamp + rule['realert'], 0
diff = seconds(timestamp - last_until)
# Increase exponent if we've alerted recently
if diff < seconds(rule['realert']) * 2 ** exponent:
exponent += 1
else:
# Continue decreasing exponent the longer it's been since the last alert
while diff > seconds(rule['realert']) * 2 ** exponent and exponent > 0:
diff -= seconds(rule['realert']) * 2 ** exponent
exponent -= 1
wait = datetime.timedelta(seconds=seconds(rule['realert']) * 2 ** exponent)
if wait >= rule['exponential_realert']:
return timestamp + rule['exponential_realert'], exponent - 1
return timestamp + wait, exponent
if __name__ == '__main__':
client = ElastAlerter(sys.argv[1:])
if not client.args.silence:
client.start()
| apache-2.0 | -5,955,821,547,859,423,000 | 43.828689 | 242 | 0.567735 | false |
legnaleurc/wcpan.worker | tests/test_queue.py | 1 | 1486 | import asyncio
import unittest as ut
import async_timeout as at
import wcpan.worker as ww
from . import util as u
class TestAsyncQueue(ut.TestCase):
@ww.sync
async def testImmediatelyShutdown(self):
with at.timeout(0.1):
async with ww.AsyncQueue(8) as aq:
pass
@ww.sync
async def testPost(self):
async with ww.AsyncQueue(1) as aq:
fn = u.NonBlocker()
rc = u.ResultCollector()
aq.post(fn)
aq.post(rc)
await rc.wait()
self.assertEqual(fn.call_count, 1)
@ww.sync
async def testPostParallel(self):
async with ww.AsyncQueue(2) as aq:
rc = u.ResultCollector()
async def wait_one_second():
await asyncio.sleep(0.25)
rc.add(42)
aq.post(wait_one_second)
aq.post(wait_one_second)
with at.timeout(0.3):
aq.post(rc)
await rc.wait()
self.assertEqual(rc.values, [42, 42])
@ww.sync
async def testFlush(self):
async with ww.AsyncQueue(1) as aq:
fn1 = u.NonBlocker(p=2)
fn2 = u.NonBlocker(p=1)
rc = u.ResultCollector()
aq.post(fn1)
aq.post(fn2)
aq.flush(lambda t: t.priority == 2)
aq.post(rc)
await rc.wait()
self.assertEqual(fn1.call_count, 0)
self.assertEqual(fn2.call_count, 1)
| mit | 6,828,595,760,960,397,000 | 22.21875 | 47 | 0.530283 | false |
jesseward/musicdiscovery-assist | tests/test_liblastfm.py | 1 | 1299 | import json
import pytest
from mock import MagicMock
from discovery_assist.liblastfm import LastFM
def test_clean_string():
fm = LastFM('no api key')
assert fm._clean_string('<esi>sys64738</esi>') == 'sys64738'
def test_get_similar_tracks():
with open('lastfm-track.getSimilar.json') as fh:
data = json.load(fh)
fm = LastFM('no api key')
fm._fetch = MagicMock(return_value=data)
assert len(fm.get_similar_tracks('artist', 'track')) == 99
def test_get_artist_info():
with open('lastfm-artist.getInfo.json') as fh:
data = json.load(fh)
fm = LastFM('fake key for testing')
fm._fetch = MagicMock(return_value=data)
assert fm.get_artist_info('mock artist') == fm._clean_string(data['artist']['bio']['content'])
def test_get_similar_artists():
with open('lastfm-artist.getSimilar.json') as fh:
data = json.load(fh)
fm = LastFM('fake key for testing')
fm._fetch = MagicMock(return_value=data)
assert len(fm.get_similar_artists('artist')) == 100
def test_get_artist_top_tracks():
with open('lastfm-artist.getTopTracks.json') as fh:
data = json.load(fh)
fm = LastFM('fake key for testing')
fm._fetch = MagicMock(return_value=data)
assert len(fm.get_artist_top_tracks('artist')) == 50
| mit | 4,994,567,215,904,635,000 | 24.470588 | 98 | 0.655119 | false |
fhdk/pacman-mirrors | pacman_mirrors/functions/httpFn.py | 1 | 7641 | #!/usr/bin/env python
#
# This file is part of pacman-mirrors.
#
# pacman-mirrors is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pacman-mirrors is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pacman-mirrors. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Frede Hundewadt <echo ZmhAbWFuamFyby5vcmcK | base64 -d>
"""Manjaro-Mirrors HTTP Functions"""
import collections
import filecmp
import json
import os
import requests
import ssl
import time
import urllib.request
import urllib.parse
from http.client import HTTPException
from os import system as system_call
from socket import timeout
from urllib.error import URLError
from pacman_mirrors import __version__
from pacman_mirrors.config import configuration as conf
from pacman_mirrors.constants import timezones
from pacman_mirrors.constants import txt
from pacman_mirrors.functions import fileFn
from pacman_mirrors.functions import jsonFn
from pacman_mirrors.functions import util
USER_AGENT = {"User-Agent": "{}{}".format(conf.USER_AGENT, __version__)}
def get_url_last_modifed(url: str) -> str:
x = requests.head(url)
return x.headers["last-modified"]
def download_mirrors(config: object) -> tuple:
"""Retrieve mirrors from manjaro.org
:param config:
:returns: tuple with bool for mirrors.json and status.json
:rtype: tuple
"""
fetchmirrors = False
fetchstatus = False
try:
# mirrors.json
req = urllib.request.Request(url=config["url_mirrors_json"],
headers=USER_AGENT)
with urllib.request.urlopen(req) as response:
mirrorlist = json.loads(response.read().decode("utf8"),
object_pairs_hook=collections.OrderedDict)
fetchmirrors = True
tempfile = config["work_dir"] + "/.temp.file"
jsonFn.json_dump_file(mirrorlist, tempfile)
filecmp.clear_cache()
if fileFn.check_file(conf.USR_DIR, folder=True):
if not fileFn.check_file(config["mirror_file"]):
jsonFn.json_dump_file(mirrorlist, config["mirror_file"])
elif not filecmp.cmp(tempfile, config["mirror_file"]):
jsonFn.json_dump_file(mirrorlist, config["mirror_file"])
os.remove(tempfile)
except (HTTPException, json.JSONDecodeError, URLError):
pass
try:
# status.json
req = urllib.request.Request(url=config["url_status_json"],
headers=USER_AGENT)
with urllib.request.urlopen(req) as response:
statuslist = json.loads(
response.read().decode("utf8"),
object_pairs_hook=collections.OrderedDict)
fetchstatus = True
jsonFn.write_json_file(statuslist, config["status_file"])
except (HTTPException, json.JSONDecodeError, URLError):
pass
# result
return fetchmirrors, fetchstatus
def get_ip_country() -> str:
"""
Get the user country from connection IP (might be VPN who knows)
:return: country name
"""
try:
return requests.get("https://get.geojs.io/v1/ip/country/full").text
except (URLError, HTTPException):
return ""
def get_mirror_response(url: str, config: object, tty: bool = False, maxwait: int = 2,
count: int = 1, quiet: bool = False, ssl_verify: bool = True) -> float:
"""Query mirror by downloading a file and measuring the time taken
:param config:
:param ssl_verify:
:param tty:
:param url:
:param maxwait:
:param count:
:param quiet:
:returns always return a float value with response time
"""
response_time = txt.SERVER_RES # prepare default return value
probe_stop = None
message = ""
context = ssl.create_default_context()
arch = "x86_64"
if config["x32"]:
arch = "i686"
probe_url = f"{url}{config['branch']}/core/{arch}/{config['test_file']}"
if not ssl_verify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib.request.Request(url=probe_url, headers=USER_AGENT)
probe_start = time.time()
# noinspection PyBroadException
try:
for _ in range(count):
response = urllib.request.urlopen(req, timeout=maxwait, context=context)
_ = response.read()
probe_stop = time.time()
except URLError as err:
if hasattr(err, "reason"):
message = f"{err.reason} '{url}'"
elif hasattr(err, "code"):
message = f"{err.reason} '{url}'"
except timeout:
message = f"{txt.TIMEOUT} '{url}'"
except HTTPException:
message = f"{txt.HTTP_EXCEPTION} '{url}'"
except ssl.CertificateError:
message = f"{ssl.CertificateError} '{url}'"
except Exception as e:
message = f"{e} '{url}'"
if message and not quiet:
util.msg(message=message, urgency=txt.ERR_CLR, tty=tty, newline=True)
if probe_stop:
response_time = round((probe_stop - probe_start), 3)
return response_time
def check_internet_connection(tty: bool = False, maxwait: int = 2) -> bool:
"""Check for internet connection
:param maxwait:
:param tty:
"""
resp = None
hosts = conf.INET_CONN_CHECK_URLS
for host in hosts:
# noinspection PyBroadException
try:
resp = urllib.request.urlopen(host, timeout=maxwait)
break
except Exception as e:
util.msg(f"{host} '{e}'", urgency=txt.WRN_CLR, tty=tty)
return bool(resp)
def ping_host(host: str, tty: bool = False, count: int = 1) -> bool:
"""Check a hosts availability
:param host:
:param count:
:param tty:
:rtype: boolean
"""
util.msg(f"ping {host} x {count}", urgency=txt.INF_CLR, tty=tty)
return system_call("ping -c{} {} > /dev/null".format(count, host)) == 0
def download_mirror_pool(config: object, tty: bool = False, quiet: bool = False) -> tuple:
"""Download updates from repo.manjaro.org
:param config:
:param quiet:
:param tty:
:returns: tuple with True/False for mirrors.json and status.json
:rtype: tuple
"""
result = None
connected = check_internet_connection(tty=tty)
if connected:
if not quiet:
util.msg(message=f"{txt.DOWNLOADING_MIRROR_FILE} {txt.REPO_SERVER}",
urgency=txt.INF_CLR,
tty=tty)
result = download_mirrors(config)
else:
if not fileFn.check_file(config["status_file"]):
if not quiet:
util.msg(message=f"{txt.MIRROR_FILE} {config['status_file']} {txt.IS_MISSING}",
urgency=txt.WRN_CLR,
tty=tty)
util.msg(message=f"{txt.FALLING_BACK} {conf.MIRROR_FILE}",
urgency=txt.WRN_CLR,
tty=tty)
result = (True, False)
if not fileFn.check_file(config["mirror_file"]):
if not quiet:
util.msg(message=f"{txt.HOUSTON}",
urgency=txt.HOUSTON,
tty=tty)
result = (False, False)
return result
| gpl-3.0 | 6,548,004,164,943,366,000 | 34.050459 | 95 | 0.621646 | false |
frjanibo/Stream | resources/site-packages/stream/scrapers/espoiler.py | 1 | 5606 | from stream import plugin
from stream.scrapers import scraper
from stream.ga import tracked
from stream.caching import cached_route
from stream.utils import ensure_fanart
from stream.library import library_context
from stream.utils import url_get
from bs4 import BeautifulSoup
import re
import requests
import json
BASE_URL = "http://www.espoilertv.com/"
HEADERS = {
"Referer": BASE_URL,
}
payload = {
'mail':plugin.get_setting("espoiler_user"),
'pass':plugin.get_setting("espoiler_pass")
}
s = requests.Session()
p = s.post(BASE_URL+'serv/asincrono/logOn.php', data=payload)
print p.content
@plugin.route("/espoiler/marcar/<idEpisodio>/<accion>")
def espoiler_marcar_visto(idEpisodio, accion):
import xbmc
r = s.post(BASE_URL+"api/v1/mitv", {'accion':accion, 'idEpisodio':idEpisodio} )
print r.content
xbmc.executebuiltin('Container.Refresh')
@scraper("Espoiler TV")
@plugin.route("/espoiler")
def espoiler_index():
print "espoilerTV!"
plugin.set_content("episodes")
yield {
"label": ">> Calendario",
"path": plugin.url_for("espoiler_calendario", dia=0),
'is_playable': False
}
r = s.get(BASE_URL + 'api/v1/mitv?grupo=porVer')
mitv = json.loads(r.content)
for serie in mitv['series']:
print serie['titulo']
print plugin.url_for("espoiler_ver_fuentes", capitulo=serie['idEpisodio'])
item = {}
item['label'] = '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2))
item['path'] = plugin.url_for("espoiler_ver_serie", titulo=serie['titBase'])
item['is_playable'] = False
item['replace_context_menu'] = True
yield item
@plugin.route("/espoiler/serie/<titulo>")
def espoiler_ver_serie(titulo):
print "espoiler_ver_serie %s" % titulo
plugin.set_content("episodes")
html_data = s.get( BASE_URL+"ficha/"+titulo )
soup = BeautifulSoup(html_data.content, "html5lib")
for node in soup.findAll('div',attrs={'class': re.compile(r".*\bepisodio\b.*")}):
print node
if node.div.input.has_attr('value'):
#print node.div.input['value']
divTitulo = node.findAll('div',attrs={'class': re.compile(r".*\btitulo\b.*")})[0].get_text()
visto = node.findAll('button',attrs={'class': re.compile(r".*\bvisto\b.*")})[0]['data-visto']
playcount = 0 if visto=='no' else 2
print visto + " " + str(playcount)
contextMenu = ("Marcar como visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='visto' ))
if playcount > 0:
contextMenu = ("Marcar como NO visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='noVisto' ))
yield {
'label': '%s - %s' % (node['id'],divTitulo),
'path': plugin.url_for("espoiler_ver_fuentes", capitulo=node.div.input['value']),
'is_playable': False,
'context_menu': [ contextMenu ],
'info':{
"episode": "la madre que lo pario",
'playcount': playcount
}
}
@plugin.route("/espoiler/fuentes/<capitulo>")
def espoiler_ver_fuentes(capitulo):
r = s.get(BASE_URL+"serv/asincrono/enlaces.php?id="+capitulo)
info = json.loads(r.content)
"""
yield {
'label': '%s (S%sE%s)' % (info['titSerie'], info['temporada'].zfill(2), info['episodio'].zfill(2)),
'path': plugin.url_for("espoiler_ver_fuentes", capitulo ),
'is_playable':False
}
"""
for fuente in info['vid']:
yield {
'label': '%s (%s,%s)' % (fuente['dominio'], fuente['descargas'], fuente['reportes']),
'path': plugin.url_for("espoiler_play", url=fuente['url']),
'is_playable': False
}
@plugin.route("/espoiler/play/<url>")
def espoiler_play( url ):
print "espoiler_play %s" % url
html_data = url_get( url, headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
def filter_Magnet(el):
return el.has_attr('href') and 'magnet:' in el['href']
nodes = soup.findAll(filter_Magnet)
for node in nodes:
yield {
'label': '%s' % node['href'],
'path': plugin.url_for("play", uri=node['href']),
'is_playable': True
}
@plugin.route("/espoiler/calendario/<dia>")
@cached_route(ttl=3000, content_type="episodes")
def espoiler_calendario(dia=0):
from datetime import date,timedelta
dia = int(dia)
hoy = date.today()
un_dia = timedelta(days=1)
hoy = hoy + un_dia*dia
yield {
'label': 'espoilerTV Inicio' ,
'path': plugin.url_for("espoiler_index"),
'is_playable': False
}
yield {
'label': '<<< ' + (hoy-un_dia).isoformat() ,
'path': plugin.url_for("espoiler_calendario", dia=dia-1),
'is_playable': False
}
r = s.get( BASE_URL+"api/v1/calendario?fecha="+hoy.isoformat() )
dayCalendar = json.loads(r.content)
for serie in dayCalendar['series']:
yield {
'label': '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2)),
'path': plugin.url_for("espoiler_ver_serie", titulo=serie['titBase']),
'is_playable': False
}
yield {
'label': '>>> '+(hoy+un_dia).isoformat() ,
'path': plugin.url_for("espoiler_calendario", dia=dia+1),
'is_playable': False
}
| gpl-3.0 | 6,394,528,551,549,263,000 | 34.257862 | 173 | 0.588833 | false |
vojtechtrefny/python-meh | meh/safe_string.py | 1 | 2372 | #
# Copyright (C) 2013 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vratislav Podzimek <[email protected]>
#
#
import sys
PY = int(sys.version.split('.')[0])
"""
This module provides a SafeStr class.
@see: SafeStr
"""
class SafeStr(str):
"""
String class that has a modified __add__ method so that ascii strings,
binary data represented as a byte string and unicode objects can be
safely appended to it (not causing traceback). BINARY DATA IS OMITTED.
"""
def __add__(self, other):
if PY > 2:
return SafeStr(str.__add__(self, str(other)))
if not (isinstance(other, str) or isinstance(other, unicode)):
if hasattr(other, "__str__"):
other = other.__str__()
else:
other = "OMITTED OBJECT WITHOUT __str__ METHOD"
if isinstance(other, unicode):
ret = SafeStr(str.__add__(self, other.encode("utf-8")))
else:
try:
# try to decode which doesn't cause traceback for utf-8 encoded
# non-ascii string and ascii string
other.decode("utf-8")
ret = SafeStr(str.__add__(self, other))
except UnicodeDecodeError:
# binary data, get the representation used by Python for
# non-ascii bytes
# hex(255) returns "0xff", we want "\xff"
other_hexa = (hex(ord(char)) for char in other)
other_backslashed = (hex_num.replace("0x", "\\x")
for hex_num in other_hexa)
other_repr = "".join(other_backslashed)
ret = SafeStr(str.__add__(self, other_repr))
return ret
| gpl-2.0 | 4,265,282,975,284,031,500 | 32.408451 | 79 | 0.602024 | false |
kmaiti/AWSAutoScalingWithF5andCloudFormation | aws-autoscale-ec2-instance-modify.py | 1 | 6954 | #!/usr/bin/env python
"""
Purpose : Extract next sequence number of auto-scaled instance and set new tag to self instance. Script will be running from new instance.
will take input from command line instead of from json file
Future Plan :
will associate instance to a role based IAM profile
Usage :
python ec2-autoscale-instance-modify.py -a <your aws access_key> -s <aws secret key> -g <auto scale group that used in cloudformation file> -r <region> -n <min_server_number> -c <customer> -t <uat/plab/prod> -p <appname> -d <domainname ie example.net>
"""
__author__ = "kama maiti"
__copyright__ = "Copyright 2016, AWS autoscaled instance tag modification project"
__credits__ = ["kamal maiti"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "kamal maiti"
__email__ = "[email protected]"
__status__ = "production/Non-production"
import re
import argparse
import boto.ec2.autoscale
from boto.ec2 import EC2Connection
import shlex, subprocess
akey = ""
skey = ""
ag = ""
rg = ""
min_num = ""
def find_server_number(str):
#Assuming first match only with consecutive three digits
match = []
match = re.findall(r'\d\d\d', str)
if match:
return match #will return a list containg server number
else:
return match #will return blank list
def main():
arg_parser = argparse.ArgumentParser(description='Read autoscale instance')
arg_parser.add_argument('-a', dest='akey',help='Provide AWS_ACCESS_KEY')
arg_parser.add_argument('-s', dest='skey',help='Provide AWS_SECRET_ACCESS_KEY')
arg_parser.add_argument('-g', dest='ag',help='Provide User provided autoscale group name')
arg_parser.add_argument('-r', dest='rg',help='Provide region name')
arg_parser.add_argument('-n', dest='min_num',help='Minimum Server name')
arg_parser.add_argument('-c', dest='customer',help='Name of the customer in short')
arg_parser.add_argument('-t', dest='servertype',help='Type of the server ie prod or uat or plab')
arg_parser.add_argument('-p', dest='purpose',help='Purpose of the Server')
arg_parser.add_argument('-d', dest='domain',help='Domain name that will be appended to server name')
args = arg_parser.parse_args()
#print(args)
access_key = args.akey
secret_key = args.skey
region = args.rg
group_name = str(args.ag)
min_server_num = int(args.min_num)
customer = str(args.customer)
servertype = str(args.servertype)
purpose = str(args.purpose)
domain = str(args.domain)
#created two objects below. One for autocale connection and another for ec2 instance
as_conn = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
try:
groups = as_conn.get_all_groups()
all_groups = [group.name for group in groups]
for g in all_groups:
if group_name in g: #searching autocaling group that we are concerned with. Note all autoscalling group name should be unique
FOUND_GROUP = g #FOUND_GROUP will save exact AG name. Note that exact AG name is not same as user provided name. It'll check if group_name is subset of g
FOUND_GROUP_WITH_DES = as_conn.get_all_groups(names=[FOUND_GROUP])[0]
instance_ids = [i.instance_id for i in FOUND_GROUP_WITH_DES.instances]
#reservations = ec2_conn.get_all_instances(instance_ids)
instances = ec2_conn.get_only_instances(instance_ids)
#instances = [i for r in reservations for i in r.instances]
lNameTag = []
#collect all tags of all instances and sort Name tags and save them in list.
for i,j in enumerate(instances):
a = instances[i].tags
lNameTag.append(a['Name'])
#process each instances and take their server number in one list
lServerNum = []
if not lNameTag: #checking if list is empty or not. If empty then this is first instance whose server num will be min_server_num
next_number = min_server_num
else:
for server in lNameTag: #iterating each value of "Name" tag
if not find_server_number(server): #if method find_server_number returns null list
next_number = min_server_num
else:
val = find_server_number(server) #got value like [u'101']. Below comand will remove [],' and u
actual_num=str(val).strip('[]').strip('u').strip('\'')
lServerNum.append(int(actual_num)) #actual_num is string, need to convert to int
if not lServerNum: #check if list of server number is blank or not
next_number = min_server_num
else:
maximum_number = max(lServerNum) #used max function to find out maximum number in the list
next_number = maximum_number + 1
#Now we need to save this next_number in a file so that we can collect it and send to other commands.
with open('/tmp/serverno','w') as fd: #created a file and save the number as string. Then read it and used later
fd.write(str(next_number))
with open('/tmp/serverno','r') as fd:
num=fd.read()
#Will modify tag of current instance. Let's build a new tag.
delm = "-" #Delimeter that will be used to join multiple string
seq = ( customer, servertype, purpose, num, domain) #created a tuple
new_tag = delm.join(seq) #joined tuple strings
with open('/tmp/nodename','w') as fd:
fd.write(str(new_tag))
#will extract current instance ID using curl. ie curl http://169.254.169.254/latest/meta-data/instance-id
#
cmd = 'curl http://169.254.169.254/latest/meta-data/instance-id'
#shlex is simple lexical analyser for splitting a large string into tokens
args = shlex.split(cmd) #args will have value like : ['curl', 'http://169.254.169.254/latest/meta-data/instance-id']
output,error = subprocess.Popen(args,stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #out and error are saved in variable. communicate will execute comamnd
#o="i-fd96291f" #used for testing
cur_instance_reservation = ec2_conn.get_all_instances(instance_ids=output)
cur_instance = cur_instance_reservation[0].instances[0]
cur_instance.add_tag('Name', new_tag)
finally:
as_conn.close()
ec2_conn.close()
if __name__ == '__main__':
main()
| gpl-3.0 | -2,848,600,508,939,780,600 | 53.755906 | 252 | 0.623095 | false |
GeographicaGS/Elcano-iepg | www-srv/src/scripts/popgdp_newyear.py | 1 | 1294 | new_year = 2014
input_file = '../csv/popgdp2014.csv'
import sys
sys.path.append('../../www-srv/src')
from maplex.maplexmodel import MaplexModel
mm = MaplexModel()
import csv
with open(input_file, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
sql = ""
for row in spamreader:
countryname = row[0]
gdp = row[1].replace(",",".")
population = row[2].replace(",",".")
if countryname.strip() == "":
continue
geoentity = mm.getIdGeoentityByName(countryname,2)
if not geoentity:
raise Exception('Not found geoentity for ' + countryname)
geoentity_id = geoentity[0]["id_geoentity"]
geoentity_names = mm.getGeoentityNames(geoentity_id,1)
if not geoentity_names:
raise Exception('Not found geoentity code name for ' + countryname)
geoentity_code = geoentity_names[0]["names"][0]
sql += "INSERT INTO iepg_data_redux.pob_pib (code,date_in,date_out,pib,population) VALUES ('%s','%s','%s',%f,%f);\n" % \
(geoentity_code,str(new_year) + "-01-01",\
str(new_year) + "-12-31",float(gdp)* (10**9),\
float(population)* (10**6))
print sql | gpl-3.0 | -5,986,356,302,128,184,000 | 28.431818 | 128 | 0.566461 | false |
bmazin/ARCONS-pipeline | util/ObsFileSeqV2.py | 1 | 33310 | import os
import math
import time
import warnings
import numpy as np
from util import FileName
from util import ObsFile
from util import TCS
from interval import interval
import pyfits
import matplotlib.pyplot as plt
import pickle
from headers.DisplayStackHeaders import writeImageStack, readImageStack
class ObsFileSeq():
"""
Deal with a sequence of obsFiles, and present data as a set of
frames. Each frame has the telescope pointing at the same location,
as calculated by the TCS log file written by the data acquisition
system.
For a given run and date, consider the list of timeStamps as one
continuous observation, with the given name. Divide the observations
into a set of frames, with the maximum amount of seconds in one frame given
by dt
If the data acquisition system was doing a mosaic at the time, each
frame will contain data only for one telescope position. Two seconds after
each telescope move is masked, to give the telescope a chance to settle
in its new position.
One frame may contain data from more than one ObsFile.
getTargetList lists the timestamp (and target description) to show
the list of obsFiles included in this sequence.
getFrameList lists the information for each frame; its ra,dec offset,
and which obsFiles contribute to this frame.
plotLocations makes a simple png of the frame numbers plotted in
raOffset,decOffset from the initial position.
executing just this file demonstrates calls to getTargetlist and
plotLocations for a mosaic of the ring nebula.
"""
def __init__(self, name, run, date, timeStamps, dt):
"""
name -- a useful name for this set of objects
run -- the data campaign name (LICK2014)
date -- sundown date (20141020)
timeStamps -- the UTC date/time stamp
dt -- the maximum number of seconds for one frame
beamMapFile -- the beamMap file to use (None to use default)
"""
self.name = name
self.run = run
self.date = date
self.timeStamps = timeStamps
self.timeStamps.sort()
self.dt = dt
self.obsFiles = []
self.fileNames = []
self.obsFileUnixTimes = []
for timeStamp in self.timeStamps:
fn = FileName.FileName(run, date, timeStamp)
self.fileNames.append(fn)
of = ObsFile.ObsFile(fn.obs(), repeatable=True)
of.loadBeammapFile(fn.beammap())
of.loadBestWvlCalFile()
fn2 = FileName.FileName(run, date, "")
of.loadFlatCalFile(fn2.flatSoln())
try:
of.loadHotPixCalFile(fn.timeMask())
self.hotPixelsApplied = True
except:
self.hotPixelsApplied = False
self.obsFiles.append(of)
self.obsFileUnixTimes.append(of.getFromHeader('unixtime'))
self.tcs = TCS.TCS(run, date)
self.tcsDict = self.tcs.select(self.obsFiles[0], self.obsFiles[-1])
# each interval covers one obsFile
self.obsIntervals = []
for i in range(len(self.obsFiles)):
tStart = self.obsFiles[i].getFromHeader('unixtime')
tEndThis = tStart + self.obsFiles[i].getFromHeader('exptime')
if i < len(self.obsFiles) - 1:
tStartNext = self.obsFiles[i + 1].getFromHeader('unixtime')
tEnd = min(tEndThis, tStartNext)
else:
tEnd = tEndThis
self.obsIntervals.append(interval[tStart, tEnd])
self._defineFrames(dt)
# Default settings for astrometry
try:
self.setRm()
except:
# Fails if the telescope doesn't move every frame...
# Ignore this for now
pass
def setRaDrift(self, raDrift):
self.raArcsecPerSec = raDrift
def setScaleThetaDrift(self, moveList, driftList):
"""
Set scale and theta for this sequence.
input:
driftList is a list of dictionaries, containing iFrame, row, col
for the object found in two frames separated significantly in time
and with the same ra,dec offset
moveList is a list of dictionaries, containing iFrame, row, col
for the object found in two frames that are close in time
"""
# Calculate the scale and rotation with no drift
matchList = []
for frc in moveList:
iFrame = frc['iFrame']
matchList.append({'ra': self.tcsDict['raOffset'][iFrame] / 3600.0,
'dec': self.tcsDict['decOffset'][iFrame] / 3600.0,
'row': frc['row'],
'col': frc['col']})
scaleTheta = ObsFileSeq.getScaleTheta(matchList)
ct = math.cos(scaleTheta['theta'])
st = math.sin(scaleTheta['theta'])
# See how much row,col=0,0 moved between the two drift frames
ras = []
decs = []
times = []
for i in range(2):
row = driftList[i]['row']
col = driftList[i]['col']
print "i,row,col", i, row, col
ras.append((col * ct + row * st) / scaleTheta['scale'])
decs.append((-col * st + row * ct) / scaleTheta['scale'])
iFrame = driftList[i]['iFrame']
times.append(self.getTimeBySeq(iFrame))
self.raArcsecPerSec = 3600.0 * (ras[1] - ras[0]) / (times[1] - times[0])
print "ras=", ras
print "times=", times
print "raArcsecPerSec", self.raArcsecPerSec
self.rc0 = np.zeros((2, len(self.frameObsInfos)), dtype=np.float)
# Calculate the scale and rotation, including drift
matchList = []
t0 = self.getTimeBySeq(0)
for frc in moveList:
iFrame = frc['iFrame']
t = self.getTimeBySeq(iFrame)
raDrift = self.raArcsecPerSec * (t - t0)
ra = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
matchList.append({'ra': ra,
'dec': self.tcsDict['decOffset'][iFrame] / 3600.0,
'row': frc['row'],
'col': frc['col']})
scaleTheta = ObsFileSeq.getScaleTheta(matchList)
# Find the row,col at the ra,dec of each frame
ct = math.cos(scaleTheta['theta'])
st = math.sin(scaleTheta['theta'])
for iFrame in range(len(self.frameObsInfos)):
t = self.getTimeBySeq(iFrame)
raDrift = self.raArcsecPerSec * (t - t0)
print "iFrame,raDrift", iFrame, raDrift
raOff = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
deOff = (self.tcsDict['decOffset'][iFrame]) / 3600.0
# 0 for row; 1 for col
self.rc0[0:iFrame] = (raOff * st + deOff * ct) / scaleTheta['scale']
self.rc0[1:iFrame] = (raOff * ct - deOff * st) / scaleTheta['scale']
print "iFrame, raOffset, deOffset", iFrame, self.tcsDict['raOffset'][iFrame], self.tcsDict['decOffset'][
iFrame], raOff, deOff
# print "iFrame, raOff, deOff, row, col",iFrame,raOff,deOff, self.rc0[0,iFrame], self.rc0[1,iFrame]
# Numpy Kung-Fu here to subtract the minimum row,col
self.rc0 -= self.rc0.min(axis=1)[:, None]
# Calculate the size of the full image to include all pixels
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
@staticmethod
def getScaleTheta(matchList, flip=1):
"""
Calculate scale and theta for the measurement of an object in two
frames. The coordinate system is centered on the object.
col,row = position in pixel coordinates
ra,dec = position in sky coordinates
The transformation is specified with:
f = +1 or -1 to flip one axis:
col0,row0 is the location in pixel coordinates of the origin of
the sky coordinates, where ra,dec = 0,0
theta -- rotation angle
scale -- degrees/pixel
The transformation equations are:
col = col0 + (flip*ra*cos(theta) - dec*sin(theta)) / scale
row = row0 + (flip*ra*sin(theta) + dec*sin(theta)) / scale
ra = ( col*cos(theta) + row*sin(theta)) * scale / flip
dec = (-col*sin(theta) + row*cos(theta)) * scale
input:
matchList is a list of dictionaries, containing ra, dec, row, col.
ra,dec is the location (in decimal degrees) of row,col=(0,0)
row,col is the location of the object in the frame
return:
a dictionary of scale (in degrees/pixel) and theta (radians)
"""
m0 = matchList[0]
m1 = matchList[1]
dra = m1['ra'] - m0['ra']
ddec = m1['dec'] - m0['dec']
dr = m1['row'] - m0['row']
dc = m1['col'] - m0['col']
theta = math.atan2((flip * dra * dr - ddec * dc), (ddec * dr + flip * dra * dc))
scale = math.sqrt((dra ** 2 + ddec ** 2) / (dc ** 2 + dr ** 2))
return {"scale": scale, "theta": theta}
def setTransform(self, scale, thetaDegrees, flip, rdot, cdot):
"""
"""
self.scale = scale # degrees/pixel
self.thetaDegrees = thetaDegrees
print "in setTransform: scale, thetaDegrees =",scale, thetaDegrees
self.ct = math.cos(math.radians(thetaDegrees))
self.st = math.sin(math.radians(thetaDegrees))
self.flip = flip
self.rdot = rdot
self.cdot = cdot
# Keep track of the position of r,c=0,0 in each fram with the arreay self.rc0,where
#r0 = int(self.rc0[0, iFrame])
#c0 = int(self.rc0[1, iFrame])
self.rc0 = np.zeros((2, self.nFrames), dtype=float)
for iFrame in range(self.nFrames):
r0c0 = self.getR0C0(iFrame)
self.rc0[0, iFrame] = r0c0['row0']
self.rc0[1, iFrame] = r0c0['col0']
# Numpy yoga to subtract minimum row,col and set nRowNcol
self.rc0 -= self.rc0.min(axis=1)[:, None]
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
print "end of setTransform: nRowCol=",self.nRowCol
def getR0C0(self, iFrame):
ra = self.tcsDict['raOffset'][iFrame]
dec = self.tcsDict['decOffset'][iFrame]
col = (self.flip * ra * self.ct - dec * self.st) / self.scale
row = (self.flip * ra * self.st + dec * self.ct) / self.scale
dt = self.tcsDict['timeOffset'][iFrame]
col0 = -col - dt*self.cdot
row0 = -row - dt*self.rdot
retval = dict(col0=col0, row0=row0)
print "iFrame=%3d ra(as)=%5.1f dec(as)=%5.1f row=%5.1f col=%5.1f dt=%9.3f col0=%5.1f row0=%5.1f"%(iFrame, ra, dec, row, col, dt, col0, row0)
return retval
def setRm(self,
degreesPerPixel=0.4 / 3600,
thetaDeg=0.0,
raArcsecPerSec=0.0,
verbose=False):
"""
Sets variables that will be used to offset frames
self.rdl is a list of raOffset, decOffset based on where the
telescope says it was pointing, adding in the drift in ra
self.rc0 is a list of the row,col locations
"""
if verbose:
print " arcsecPerPixel = ", degreesPerPixel * 3600
print "theta (degrees) = ", thetaDeg
print " raArcsecPerSec = ", raArcsecPerSec
self.degreesPerPixel = degreesPerPixel
self.thetaDeg = thetaDeg
self.raArcsecPerSec = raArcsecPerSec
theta = math.radians(thetaDeg)
sct = math.cos(theta) * degreesPerPixel
sst = math.sin(theta) * degreesPerPixel
self.rmPixToEq = np.array([[sct, -sst], [sst, sct]])
self.rmEqToPix = np.linalg.inv(self.rmPixToEq)
t0 = self.getTimeBySeq(0)
self.rdl = []
for iFrame in range(len(self.frameObsInfos)):
t = self.getTimeBySeq(iFrame)
raDrift = raArcsecPerSec * (t - t0)
raOff = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
deOff = (self.tcsDict['decOffset'][iFrame]) / 3600.0
self.rdl.append([raOff, deOff])
self.rc0 = np.dot(self.rmEqToPix, np.array(self.rdl).transpose())
# Numpy Kung-Fu here to subtract the minimum row,col
self.rc0 -= self.rc0.min(axis=1)[:, None]
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
def makeMosaicImage(self, iFrameList=None, wvlBinRange=None,
verbose=False):
"""
create a mosaic image of the frames listed, in the wavelength bin range
input: iFrameList, default None uses all frames
wvlBinRange, default None uses all wavelength bins,
otherwise (wBinMin,wBinMax)
output: a numpy 2d of the counts/second image
"""
try:
self.cubes
except AttributeError:
if verbose:
print "ObsFileSeq.makeMosaicImage: loadSpectralCubes()"
self.loadSpectralCubes()
cubeSum = np.zeros((self.nRowCol[0], self.nRowCol[1]))
effIntTimeSum = np.zeros((self.nRowCol[0], self.nRowCol[1]))
nRowCube = self.obsFiles[0].nRow
nColCube = self.obsFiles[0].nCol
if iFrameList is None:
iFrameList = range(self.nFrames)
if wvlBinRange is None:
wBinMin = 0
wBinMax = self.cubes[0]['cube'].shape[2]
else:
wBinMin = wvlBinRange[0]
wBinMax = wvlBinRange[1]
for iFrame in iFrameList:
r0 = int(self.rc0[0, iFrame])
c0 = int(self.rc0[1, iFrame])
if verbose:
print "ObsFileSeq:makeMosaicImage: r0,c0=", r0, c0
# The third index here is where you select which wavelength bins
# to include
cubeSum[r0:r0 + nRowCube, c0:c0 + nColCube] += \
self.cubes[iFrame]['cube'][:, :, wBinMin:wBinMax].sum(axis=2)
effIntTimeSum[r0:r0 + nRowCube, c0:c0 + nColCube] += \
self.cubes[iFrame]['effIntTime'][:, :]
with np.errstate(divide='ignore'):
cps = cubeSum / effIntTimeSum
cps = np.nan_to_num(cps)
return cps
def __del__(self):
for of in self.obsFiles:
del of
def _defineFrames(self, dt):
self.dt = dt
mts = self.tcsDict['time']
# make a list of times:
# start of first observation, each move, end of last observation
times = np.zeros(len(mts) + 2)
times[1:-1] = mts
times[0] = self.obsFiles[0].getFromHeader('unixtime')
self.beginTime = times[0]
times[-1] = self.obsFiles[-1].getFromHeader('unixtime') + \
self.obsFiles[-1].getFromHeader('exptime')
# Divide these segments into lengths of dt. Exclude two seconds after
# each boundary, to let the telescope settle down after a move
self.frameIntervals = []
self.locationIdx = []
for i in range(len(times) - 1):
t0 = times[i] + 2
t1 = times[i + 1]
nSeg = int((t1 - t0) / dt) + 1
delta = (t1 - t0) / nSeg
for j in range(nSeg):
self.frameIntervals.append(interval[t0 + j * delta,
t0 + (j + 1) * delta])
self.locationIdx.append(i)
# For each frame, determine a list of:
# obsFile, firstSec, integrationTime
# In general, one frame will need more than one of these if it
# spans the time boundary between obsFiles
self.frameObsInfos = []
for iFrame in range(len(self.frameIntervals)):
frameObsInfo = []
thisInterval = self.frameIntervals[iFrame]
# see if this interval overlaps with an obsFile
for i, obsInterval in enumerate(self.obsIntervals):
overlap = thisInterval & obsInterval
if len(overlap) > 0:
tBeg = overlap[0][0]
tEnd = overlap[0][1]
integrationTime = tEnd - tBeg
firstSec = tBeg - \
self.obsFiles[i].getFromHeader('unixtime')
obs = self.obsFiles[i]
obsInfo = {"obs": obs,
"iObs": i,
"firstSec": firstSec,
"integrationTime": integrationTime}
frameObsInfo.append(obsInfo)
self.frameObsInfos.append(frameObsInfo)
self.nFrames = len(self.frameObsInfos)
def getTimeBySeq(self, iSeq):
"""
get the mean time of the frame
"""
foi = self.frameObsInfos[iSeq]
wtSum = 0
wSum = 0
for oi in foi:
w = oi['integrationTime']
t = self.obsFileUnixTimes[oi['iObs']] + oi['firstSec'] + \
0.5 * oi['integrationTime']
wtSum += w * t
wSum += w
meanTime = wtSum / float(wSum)
return meanTime
def getTargetList(self, printLines=True):
"""
get list of information: the timstamp + target description in header
default printLine=True to also print each line to stdout
return: a list of the lines
"""
retval = []
for i, timeStamp in enumerate(self.timeStamps):
target = self.obsFiles[i].getFromHeader('target')
line = "%2d %s %s" % (i, timeStamp, target)
if printLines:
print line
retval.append(line)
return retval
def getFrameList(self, printLines=True):
"""
returns a list of lines which describes the frames.
Each line has an index, the time (relative to the time of the first
frame), effective integration time, location number,
the (raOffset,decOffset), and a list of obs files used
in the frame.
"""
retval = []
for i, frameInterval in enumerate(self.frameIntervals):
t0 = frameInterval[0][0] - self.beginTime
t1 = frameInterval[0][1] - self.beginTime
dt = t1 - t0
locIdx = self.locationIdx[i]
xOff = self.tcsDict['raOffset'][locIdx]
yOff = self.tcsDict['decOffset'][locIdx]
obsFiles = ""
for frameObsInfo in self.frameObsInfos[i]:
obsFiles += " %d" % frameObsInfo['iObs']
fmt = "i=%3d begin=%8.2f expTime=%6.2f loc=%2d (%5.1f,%5.1f) %s"
line = fmt % (i, t0, dt, locIdx, xOff, yOff, obsFiles)
if printLines:
print line
retval.append(line)
return retval
def getFrameDict(self, printLines=True):
"""
Pretty much the same as getFramList except that it returns
a dictionary for each frame
"""
frameInfo = []
for i, frameInterval in enumerate(self.frameIntervals):
t0 = frameInterval[0][0] - self.beginTime
t1 = frameInterval[0][1] - self.beginTime
dt = t1 - t0
locIdx = self.locationIdx[i]
xOff = self.tcsDict['raOffset'][locIdx]
yOff = self.tcsDict['decOffset'][locIdx]
meanTF = self.getTimeBySeq(i)
fI = {"iframe": i,
"begin": t0,
"expTime": dt,
"loc": locIdx,
"offsRA": xOff,
"offsDec": yOff,
"meanTime": meanTF}
# "obsFile":ofs.fileNames[i].obs(),
# "ob":ofs.obsFiles[i]}
if printLines:
print fI
frameInfo.append(fI)
self.frameDict = frameInfo
return frameInfo
def getSpectralCubeByFrame(self, iFrame, weighted=False,
fluxWeighted=False,
wvlStart=None, wvlStop=None,
wvlBinWidth=None, energyBinWidth=None,
wvlBinEdges=None, timeSpacingCut=None):
"""
return the spectral cube for this frame
call ObsFile.getSpectralCube for each ObsFile in this frame.
The dictionary returned copies 'wvlBinEdges' from the first ObsFile,
and sums the 'cube' and 'effIntTime' from all ObsFiles.
I left the print statements in to report progress, because this is
very slow.
"""
retval = None
thisInterval = self.frameIntervals[iFrame]
for i, ofInterval in enumerate(self.obsIntervals):
overlap = thisInterval & ofInterval
if len(overlap) > 0:
tBeg = overlap[0][0]
tEnd = overlap[0][1]
integrationTime = tEnd - tBeg
firstSec = tBeg - self.obsFiles[i].getFromHeader('unixtime')
obs = self.obsFiles[i]
obs.setWvlCutoffs(wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop)
spectralCube = \
obs.getSpectralCube(firstSec=firstSec,
integrationTime=integrationTime,
weighted=weighted,
fluxWeighted=fluxWeighted,
wvlStart=wvlStart,
wvlStop=wvlStop,
wvlBinWidth=wvlBinWidth,
energyBinWidth=energyBinWidth,
wvlBinEdges=wvlBinEdges,
timeSpacingCut=timeSpacingCut
)
cube = spectralCube['cube']
wbe = spectralCube['wvlBinEdges']
eit = spectralCube['effIntTime']
if retval is None:
retval = {'cube': cube,
'wvlBinEdges': wbe,
'effIntTime': eit}
else:
retval['cube'] += cube
retval['effIntTime'] += eit
return retval
def loadSpectralCubes(self, weighted=False, fluxWeighted=False,
wvlStart=None, wvlStop=None,
wvlBinWidth=None, energyBinWidth=None,
wvlBinEdges=None, timeSpacingCut=None):
"""
calls getSpectralCubeByFrame on each iFrame, storing the
results in the list self.cubes
use a pickle file named name.pkl as a buffer. If that file
exists, load the cubes from there, and save the cubes there
after loading. WARNING -- the settings are not stored, so
they are ignored when loading from the pickle file.
"""
cpfn = self.name + "-cubes.pkl"
if os.path.isfile(cpfn):
print "loadSpectralCubes: load from ", cpfn
self.cubes = pickle.load(open(cpfn, 'rb'))
else:
self.cubes = []
for iFrame in range(len(self.frameIntervals)):
print "now load spectral cube for iFrame=", iFrame
cube = self.getSpectralCubeByFrame(iFrame)
print "counts are ", cube['cube'].sum()
self.cubes.append(cube)
# self.cubes.append(self.getSpectralCubeByFrame(iFrame,
# weighted,
# fluxWeighted,
# wvlStart,
# wvlStop,
# wvlBinWidth,
# energyBinWidth,
# wvlBinEdges,
# timeSpacingCut))
print "counts read: ", self.cubes[-1]['cube'].sum()
pickle.dump(self.cubes, open(cpfn, 'wb'))
def makePngFileByInterval(self, thisInterval, wvMin=3000, wvMax=12000,
rateMax=None):
fn = "%s-%03d-%05d-%05d.png" % \
(self.name, thisInterval, int(wvMin), int(wvMax))
print "now make fn=", fn
cubeSum = self.cubes[thisInterval]['cube'].sum(axis=2)
effIntTimeSum = self.cubes[thisInterval]['effIntTime']
old_settings = np.seterr(all='ignore')
np.seterr(divide='ignore')
rate = np.nan_to_num(cubeSum / effIntTimeSum)
np.seterr(**old_settings)
print fn, rate.min(), rate.max()
plt.clf()
if rateMax is None:
rateMax = rate.max()
plt.pcolor(rate, cmap='hot', vmin=0, vmax=rateMax)
plt.colorbar()
try:
os.remove(fn)
except OSError:
pass
plt.title(fn)
plt.savefig(fn)
def makeAllFitsFiles(self, wvMin, wvMax):
self.loadSpectralCubes(wvlStart=wvMin, wvlStop=wvMax)
for interval in range(len(self.frameIntervals)):
self.makeFitsFileByInterval(interval, wvMin, wvMax)
return
def makeFitsFileByInterval(self, thisInterval, wvMin, wvMax):
fn = "%s-%03d-%05d-%05d.fit" % (self.name, thisInterval,
int(wvMin), int(wvMax))
print "now make fn=", fn
pixels = self.cubes[thisInterval]['cube'].sum(axis=2)
print "number of counts=", pixels.sum()
hdu = pyfits.PrimaryHDU(pixels)
try:
os.remove(fn)
except OSError:
pass
hdu.writeto(fn)
def loadImageStack(self, fileName, wvlStart=None, wvlStop=None,
weighted=True, fluxWeighted=False,
getRawCount=False, scaleByEffInt=True,
deadTime=100.e-6):
# If the file exists, read it out
if os.path.isfile(fileName):
return readImageStack(fileName)
# if the file doesn't exists, make it
else:
images = []
pixIntTimes = []
startTimes = []
endTimes = []
intTimes = []
for iFrame in range(len(self.frameIntervals)):
im_dict = self.getPixelCountImageByFrame(iFrame,
wvlStart, wvlStop,
weighted,
fluxWeighted,
getRawCount,
scaleByEffInt,
deadTime)
images.append(im_dict['image'])
pixIntTimes.append(im_dict['pixIntTime'])
startTimes.append(im_dict['startTime'])
endTimes.append(im_dict['endTime'])
intTimes.append(im_dict['intTime'])
writeImageStack(fileName, images, startTimes=startTimes,
endTimes=endTimes, intTimes=intTimes,
pixIntTimes=pixIntTimes, targetName=self.name,
run=self.run,
nFrames=len(self.frameIntervals),
wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop, weighted=weighted,
fluxWeighted=fluxWeighted,
hotPixelsApplied=self.hotPixelsApplied,
maxExposureTime=self.dt,
tStamps=self.timeStamps)
# return {'images':images,'pixIntTimes':pixIntTimes,
# 'startTimes':startTimes,'endTimes':endTimes,'intTimes':intTimes}
return readImageStack(fileName)
def getPixelCountImageByFrame(self, iFrame, wvlStart=None, wvlStop=None,
weighted=True, fluxWeighted=True,
getRawCount=False, scaleByEffInt=True,
deadTime=100.e-6):
'''
This gets the i'th image
Inputs:
iFrame - which frame you want
wvlStart - starting wavelength range
wvlStop - ending wavelength range
weighted, fluxWeighted, getRawCount, scaleByEffInt -
options for obsFile.getPixelCountImage()
deadTime - for deadtime correcting image
Returns:
Dictionary with the following keys:
'image' - fully calibrated, corrected image.
scaled to the total integration time
deadTime corrected
'pixIntTime' - actual integration time for each pixel in image
'intTime' - length of exposure
'startTime' - beginning of image (unix time)
'endTime' - end of image. Might be different from
startTime+intTime if there's a break in the middle while
switching to a new obsFile
'''
retval = None
for obsInfo in self.frameObsInfos[iFrame]:
print obsInfo
obsInfo['obs'].setWvlCutoffs(wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop)
im_dict = obsInfo['obs']. \
getPixelCountImage(obsInfo["firstSec"],
obsInfo["integrationTime"],
weighted,
fluxWeighted,
getRawCount,
scaleByEffInt=False)
# Do this manually so
# we can correct deadTime first
im = im_dict['image']
# print 'im: ', np.sum(im)
# Correct for dead time
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
'invalid value encountered in divide',
RuntimeWarning)
w_deadTime = \
1.0 - im_dict['rawCounts'] * deadTime / im_dict['effIntTimes']
im = im / w_deadTime
if scaleByEffInt:
# Correct for exposure time
im = im * obsInfo["integrationTime"] / im_dict['effIntTimes']
# Remove any funny values
im[np.invert(np.isfinite(im))] = 0.
# print '--> ', np.sum(im)
if retval is None:
retval = {'image': im, 'pixIntTime': im_dict['effIntTimes'],
'intTime': obsInfo["integrationTime"],
'startTime': self.frameIntervals[iFrame][0][0],
'endTime': self.frameIntervals[iFrame][0][1]}
else:
retval['image'] += im
retval['pixIntTime'] += im_dict['effIntTimes']
retval['intTime'] += obsInfo["integrationTime"]
return retval
def plotLocations(self, fileName=None):
plt.clf()
x = self.tcsDict['raOffset']
y = self.tcsDict['decOffset']
plt.plot(x, y)
for i in range(len(x)):
plt.text(x[i], y[i], str(i),
horizontalalignment="center",
verticalalignment="center")
plt.axes().set_aspect('equal', 'datalim')
plt.title(self.name)
plt.xlabel("raOffset (arcsec)")
plt.ylabel("decOffset (arcsec)")
print "in ObsFileSeq.plotLocations: fileName=", fileName
if not fileName:
plt.show()
else:
plt.savefig(fileName)
if __name__ == "__main__":
if 0:
name = 'ring-20141020'
run = "PAL2014"
date = "20141020"
tsl = [
'20141021-033954',
'20141021-034532',
'20141021-035035',
'20141021-035538',
'20141021-040041',
'20141021-040544',
'20141021-041047',
]
dt = 200
ofs = ObsFileSeq(name, run, date, tsl, dt)
print "Now call getTargetList"
ofs.getTargetList()
print "Now call getFrameList"
ofs.getFrameList()
ofs.plotLocations(name + ".png")
print "now get time of first frame"
for i in range(66):
print "i=", i, " time=", ofs.getTimeBySeq(i)
# apci = ofs.getAllPixelCountImages(getRawCount=True)
del ofs
| gpl-2.0 | -2,156,267,733,733,224,700 | 41.379135 | 148 | 0.52918 | false |
developerlbas/relabs | relabs/helper/migrations/0001_initial.py | 1 | 1396 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Paso'
db.create_table('paso', (
('clave_trabajador', self.gf('django.db.models.fields.BigIntegerField')()),
('fecha_control', self.gf('django.db.models.fields.DateTimeField')()),
('hora_control', self.gf('django.db.models.fields.TimeField')()),
('actualizacion', self.gf('django.db.models.fields.DateTimeField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'helper', ['Paso'])
def backwards(self, orm):
# Deleting model 'Paso'
db.delete_table('paso')
models = {
u'helper.paso': {
'Meta': {'object_name': 'Paso', 'db_table': "'paso'"},
'actualizacion': ('django.db.models.fields.DateTimeField', [], {}),
'clave_trabajador': ('django.db.models.fields.BigIntegerField', [], {}),
'fecha_control': ('django.db.models.fields.DateTimeField', [], {}),
'hora_control': ('django.db.models.fields.TimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['helper'] | mit | 7,534,975,199,779,218,000 | 35.763158 | 87 | 0.577364 | false |
pfjel7/housing-insights | python/housinginsights/ingestion/CSVWriter.py | 1 | 3899 | """
CSVWriter.py contains the CSVWriter class that is used to create a clean.psv
file that can later be used to load to the database.
"""
from csv import DictWriter
import os
import copy
import uuid
logging_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "logs"))
class CSVWriter(object):
"""
Takes a row of data, plus the meta data about it, and creates a clean.csv
file locally that can be later be bulk-uploaded to the database.
"""
def __init__(self, meta, manifest_row, filename=None):
"""""
:param meta: the parsed json from the meta data containing the format
expected of each SQL table.
:param manifest_row: a dictionary from manifest.csv for the source file
currently being acted on.
:param filename: optional, filename of where to write the data.
The default is current directory temp_{tablename}.csv
"""
self.manifest_row = manifest_row
self.tablename = manifest_row['destination_table']
self.unique_data_id = manifest_row['unique_data_id']
self.meta = meta
self.fields = meta[self.tablename]['fields']
# DictWriter needs a list of fields, in order, with the same key as
# the row dict sql_fields could be used in the header row. Currently
# not using because psycopg2 doesn't like it.
self.csv_fields = []
self.sql_fields = []
for field in self.fields:
self.csv_fields.append(field['source_name'])
self.sql_fields.append(field['sql_name'])
# We always want to append this to every table. write() should also
# append this to provided data
self.dictwriter_fields = copy.copy(self.csv_fields)
# Add id column to every table
self.dictwriter_fields.append('id')
# By default, creates a temp csv file wherever the calling module was
# located
self.filename = 'temp_{}.psv'.format(self.unique_data_id) if filename == None else filename
# remove any existing copy of the file so we are starting clean
self.remove_file()
#Using psycopg2 copy_from does not like having headers in the file. Commenting out
#self.file = open(self.filename, 'w', newline='')
#headerwriter = DictWriter(self.file, fieldnames = self.sql_fields, delimiter="|")
#headerwriter.writeheader()
#self.file.close()
#print("header written")
self.file = open(self.filename, 'a', newline='', encoding='utf-8')
self.writer = DictWriter(self.file, fieldnames=self.dictwriter_fields, delimiter="|")
def write(self, row):
"""
Writes the given row into the clean pipe-delimited file that will be
loaded into the database.
:param row: the given data row
:return: None
"""
row['unique_data_id'] = self.manifest_row['unique_data_id']
# Note to developers - if this row returns a key error due to an
# optional column, it means you need to have your cleaner add a 'null'
# value for that optional column.
# Generate a random uuid
row['id'] = str(uuid.uuid4())
self.writer.writerow(row)
def open(self):
"""
Opens the file for writing. Normally called by init, but can be called
again by the user if they want to re-open the file for writing
"""
def close(self):
"""
Since we can't use a with statement in the object, it's the caller's
responsibility to manually close the file when they are done writing
"""
self.file.close()
# TODO should this be part of the __del__
def remove_file(self):
try:
os.remove(self.filename)
except OSError:
pass
| mit | 3,770,622,406,761,371,600 | 36.133333 | 99 | 0.619646 | false |
dani-i/bachelor-project | utils/charts/chart_entry.py | 1 | 2892 |
class ChartEntry:
def __init__(self):
self._identifier = ''
self._x = -1
self._y = -1
self._confidence_interval_95 = -1
def __str__(self):
rez = '\n## Is valid : ' + str(self.is_valid()) + ' ##'
rez += '\nIdentifier : ' + self.identifier
rez += '\nX : ' + str(self.x)
rez += '\nY : ' + str(self.y)
rez += '\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
return rez
##########################################################################
# identifier
@property
def identifier(self):
return self._identifier
@identifier .setter
def identifier(self,
value: str):
if not isinstance(value, str) \
or value == '':
raise ValueError('Identifier must be string and not empty.')
self._identifier = value
##########################################################################
# x
@property
def x(self):
return self._x
@x.setter
def x(self,
value):
if isinstance(value, float):
value = int(round(value))
if not isinstance(value, int) or value < 0:
raise ValueError('X must be integer and >= 0.')
self._x = value
##########################################################################
# y
@property
def y(self):
return self._y
@y.setter
def y(self,
value):
if isinstance(value, float):
value = int(round(value))
if not isinstance(value, int) or value < 0:
raise ValueError('Y must be integer and >= 0.')
self._y = value
##########################################################################
# confidence_interval_95
@property
def confidence_interval_95(self):
return self._confidence_interval_95
@confidence_interval_95.setter
def confidence_interval_95(
self,
value):
if not isinstance(value, float) or value < 0:
raise ValueError('CI must be float and >= 0.')
self._confidence_interval_95 = value
##########################################################################
def is_valid(self):
"""
- Checks if valid
:return: - True if valid
- False otherwise
"""
if not isinstance(self.identifier, str) or self.identifier == '' \
or not isinstance(self.confidence_interval_95, float) \
or self.confidence_interval_95 < 0 \
or not isinstance(self.x, int) or self.x < 0 \
or not isinstance(self.y, int) or self.y < 0:
return False
return True
##########################################################################
| apache-2.0 | 1,384,881,015,369,096,700 | 25.290909 | 78 | 0.412517 | false |
ENCODE-DCC/encoded | src/encoded/tests/fixtures/schemas/target.py | 1 | 11043 | import pytest
@pytest.fixture
def target(testapp, organism):
item = {
'label': 'ATF4',
'target_organism': organism['@id'],
'investigated_as': ['transcription factor'],
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def base_target(testapp, organism):
item = {
'target_organism': organism['uuid'],
'label': 'TAF1',
'investigated_as': ['transcription factor']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def tag_target(testapp, organism):
item = {
'target_organism': organism['uuid'],
'label': 'eGFP',
'investigated_as': ['tag']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def recombinant_target(testapp, gene):
item = {
'label': 'HA-ABCD',
'investigated_as': ['transcription factor'],
'genes': [gene['uuid']],
'modifications': [{'modification': 'HA'}]
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def base_target1(testapp, gene):
item = {
'genes': [gene['uuid']],
'label': 'ABCD',
'investigated_as': ['transcription factor']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def base_target2(testapp, gene):
item = {
'genes': [gene['uuid']],
'label': 'EFGH',
'investigated_as': ['transcription factor']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def gfp_target(testapp, organism):
item = {
'label': 'gfp',
'target_organism': organism['@id'],
'investigated_as': ['tag'],
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K4me3(testapp, organism):
item = {
'label': 'H3K4me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_CTCF(testapp, organism):
item = {
'label': 'CTCF',
'target_organism': organism['@id'],
'investigated_as': ['transcription factor']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def mouse_H3K9me3(testapp, mouse):
item = {
'target_organism': mouse['@id'],
'label': 'H3K9me3',
'investigated_as': ['histone', 'broad histone mark']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def tagged_target(testapp, gene):
item = {
'genes': [gene['uuid']],
'modifications': [{'modification': 'eGFP'}],
'label': 'eGFP-CTCF',
'investigated_as': ['transcription factor']
}
return testapp.post_json('/target', item, status=201).json['@graph'][0]
@pytest.fixture
def target_H3K27me3(testapp, organism):
item = {
'label': 'H3K27me3',
'target_organism': organism['@id'],
'investigated_as': ['histone', 'broad histone mark']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K36me3(testapp, organism):
item = {
'label': 'H3K36me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K4me1(testapp, organism):
item = {
'label': 'H3K4me1',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K27ac(testapp, organism):
item = {
'label': 'H3K27ac',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K27ac_2(testapp, organism):
item = {
'label': 'H3K27ac',
'target_organism': organism['@id'],
'investigated_as': ['histone',
'narrow histone mark']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K9me3_url(testapp, organism):
item = {
'label': 'H3K9me3',
'target_organism': organism['@id'],
'investigated_as': ['histone',
'broad histone mark']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_promoter(testapp, fly):
item = {
'label': 'daf-2',
'target_organism': fly['@id'],
'investigated_as': ['other context']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_H3K9me3(testapp, organism):
item = {
'label': 'H3K9me3',
'target_organism': organism['@id'],
'investigated_as': ['histone']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_nongene(mouse):
return {
'label': 'nongene',
'target_organism': mouse['uuid'],
'investigated_as': ['other context'],
}
@pytest.fixture
def target_one_gene(ctcf):
return {
'label': 'one-gene',
'genes': [ctcf['uuid']],
'investigated_as': ['other context'],
}
@pytest.fixture
def target_two_same_org(ctcf, myc):
return {
'label': 'two-same-org',
'genes': [ctcf['uuid'], myc['uuid']],
'investigated_as': ['other context'],
}
@pytest.fixture
def target_two_diff_orgs(ctcf, tbp):
return {
'label': 'two-diff-org',
'genes': [ctcf['uuid'], tbp['uuid']],
'investigated_as': ['other context'],
}
@pytest.fixture
def target_genes_org(human, ctcf, myc):
return {
'label': 'genes-org',
'target_organism': human['uuid'],
'genes': [ctcf['uuid'], myc['uuid']],
'investigated_as': ['other context'],
}
@pytest.fixture
def target_synthetic_tag():
return {
'label': 'FLAG',
'investigated_as': ['synthetic tag'],
}
@pytest.fixture
def mouse_target(testapp, mouse):
item = {
'label': 'ATF4',
'target_organism': mouse['@id'],
'investigated_as': ['transcription factor'],
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def mouse_target_H3K9me3(testapp, mouse):
item = {
'label': 'H3K9me3',
'target_organism': mouse['@id'],
'investigated_as': ['histone',
'broad histone mark']
}
return testapp.post_json('/target', item).json['@graph'][0]
@pytest.fixture
def target_0_0(organism):
return{
'organism': organism['uuid'],
'label': 'TEST'
}
@pytest.fixture
def target_1_0(target_0_0):
item = target_0_0.copy()
item.update({
'schema_version': '1',
'status': 'CURRENT',
})
return item
@pytest.fixture
def target_2_0(target_0_0):
item = target_0_0.copy()
item.update({
'schema_version': '2',
})
return item
@pytest.fixture
def target_5_0(target_0_0):
item = target_0_0.copy()
item.update({
'schema_version': '5',
'status': 'proposed'
})
return item
@pytest.fixture
def target_6_0(target_0_0):
item = target_0_0.copy()
item.update({
'schema_version': '6',
'status': 'current',
'investigated_as': ['histone modification', 'histone']
})
return item
@pytest.fixture
def target_8_no_genes(target_0_0):
item = target_0_0.copy()
item.update({
'schema_version': '8',
'dbxref': [
'UniProtKB:P04908'
]
})
return item
@pytest.fixture
def target_8_one_gene(target_8_no_genes):
item = target_8_no_genes.copy()
item.update({
'gene_name': 'HIST1H2AE',
'dbxref': [
'GeneID:3012',
'UniProtKB:P04908'
]
})
return item
@pytest.fixture
def target_8_two_genes(target_8_one_gene):
item = target_8_one_gene.copy()
item.update({
'gene_name': 'Histone H2A',
'dbxref': [
'GeneID:8335',
'GeneID:3012',
'UniProtKB:P04908'
]
})
return item
@pytest.fixture
def target_9_empty_modifications(target_8_one_gene):
item = {
'investigated_as': ['other context'],
'modifications': [],
'label': 'empty-modifications'
}
return item
@pytest.fixture
def target_9_real_modifications(target_8_one_gene):
item = {
'investigated_as': ['other context'],
'modifications': [{'modification': '3xFLAG'}],
'label': 'empty-modifications'
}
return item
@pytest.fixture
def gene3012(testapp, organism):
item = {
'dbxrefs': ['HGNC:4724'],
'organism': organism['uuid'],
'symbol': 'HIST1H2AE',
'ncbi_entrez_status': 'live',
'geneid': '3012',
}
return testapp.post_json('/gene', item).json['@graph'][0]
@pytest.fixture
def gene8335(testapp, organism):
item = {
'dbxrefs': ['HGNC:4734'],
'organism': organism['uuid'],
'symbol': 'HIST1H2AB',
'ncbi_entrez_status': 'live',
'geneid': '8335',
}
return testapp.post_json('/gene', item).json['@graph'][0]
@pytest.fixture
def target_10_nt_mod(organism):
item = {
'investigated_as': ['nucleotide modification'],
'target_organism': organism['uuid'],
'label': 'nucleotide-modification-target'
}
return item
@pytest.fixture
def target_10_other_ptm(gene8335):
item = {
'investigated_as': [
'other post-translational modification',
'chromatin remodeller',
'RNA binding protein'
],
'genes': [gene8335['uuid']],
'modifications': [{'modification': 'Phosphorylation'}],
'label': 'nucleotide-modification-target'
}
return item
@pytest.fixture
def target_11_control(human):
item = {
'investigated_as': ['control'],
'target_organism': human['uuid'],
'label': 'No protein target control'
}
return item
@pytest.fixture
def target_12_recombinant(ctcf):
item = {
'investigated_as': [
'recombinant protein',
'chromatin remodeller',
'RNA binding protein'
],
'genes': [ctcf['uuid']],
'modifications': [{'modification': 'eGFP'}],
'label': 'eGFP-CTCF'
}
return item
@pytest.fixture
def target_13_one_gene(target_8_one_gene, gene8335):
item = target_8_one_gene.copy()
item.update({
'schema_version': '13',
'genes': [gene8335['uuid']]
})
return item
@pytest.fixture
def target_13_no_genes(target_8_no_genes):
item = target_8_no_genes.copy()
item.update({
'schema_version': '13'
})
return item
| mit | -3,268,610,229,264,504,300 | 22.646681 | 75 | 0.563615 | false |
Oblivion1221/Pycodes | learn_py/pig_latin.py | 1 | 1048 | from __future__ import print_function
def consonant(c):
v = ['a', 'e', 'i', 'o', 'u','A', 'E', 'I', 'O', 'U']
if c not in v:
return True
else:
return False
def pig_latin_1(word):
if consonant(word[0]):
word1 = word[1:] + word[0] + 'a' + 'y'
return str(word1)
else:
return word + 'way'
def pig_latin(word):
if consonant(word[0]) and not consonant(word[1]):
return pig_latin_1(word)
elif consonant(word[0]) and consonant(word[1]):
word = word[2:] + word[0:2] + 'a' + 'y'
return word
else:
return word + 'way'
def pig_latin_sentence(sentence):
s1 = sentence.split(' ')
res = []
for word in s1:
res.append(pig_latin(word))
return ' '.join(res)
def test_pig_latin():
words = ['pig', 'banana', 'trash', 'happy', 'duck', 'glove', 'eat', 'omelet', 'are']
for word in words:
print(word, "->", pig_latin(word))
test_pig_latin()
print( pig_latin_sentence("I am talking in pig Latin")) | mit | -4,320,447,383,732,179,500 | 25.225 | 88 | 0.528626 | false |
DataDog/integrations-core | twistlock/tests/test_utils.py | 1 | 2177 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from datadog_checks.twistlock.utils import normalize_api_data_inplace
def test_normalize_api_data_inplace():
data = [
{
"scanTime": "2019-02-14T16:14:15.843Z",
'info': {
'id': 'sha256:08c927f8524a28fb8b76f369a89b2570eb6b92ba5b758dc1a87c6cf5256bf0cc',
'cveVulnerabilities': [{"text": "", "id": 46, "severity": "low"}],
'data': {
"binaries": [
{
"name": "bash",
"path": "/bin/bash",
"md5": "ac56f4b8fac5739ccdb45777d313becf",
"cveCount": 104,
"layerTime": 0,
},
]
},
'complianceVulnerabilities': [],
"complianceVulnerabilitiesCnt": 1,
"cveVulnerabilitiesCnt": 64,
"cveVulnerabilityDistribution": 64,
"pkgDistro": "debian",
"pkgDistroRelease": "stretch",
"version": "18.11.103",
},
},
]
expected_data = [
{
"scanTime": "2019-02-14T16:14:15.843Z",
'id': 'sha256:08c927f8524a28fb8b76f369a89b2570eb6b92ba5b758dc1a87c6cf5256bf0cc',
'vulnerabilities': [{"text": "", "id": 46, "severity": "low"}],
"binaries": [
{
"name": "bash",
"path": "/bin/bash",
"md5": "ac56f4b8fac5739ccdb45777d313becf",
"cveCount": 104,
"layerTime": 0,
},
],
'complianceIssues': [],
'complianceIssuesCount': 1,
"vulnerabilitiesCount": 64,
"vulnerabilityDistribution": 64,
"osDistro": "debian",
"osDistroRelease": "stretch",
"scanVersion": "18.11.103",
},
]
normalize_api_data_inplace(data)
assert expected_data == data
| bsd-3-clause | -529,899,774,556,861,500 | 33.555556 | 96 | 0.451539 | false |
pkgpl/IPythonProcessing | pkprocess/pkapp.py | 1 | 6022 | import numpy as np
from numba import jit
import scipy.signal
import scipy.interpolate
from .pkbase import *
@jit
def triang(L):
# generate triangle
w=np.zeros(L)
if L%2==0: # even L
for i in range(int(L/2)):
n=i+1
w[i]=(2.*n-1.)/L
for i in range(int(L/2),L):
n=i+1
w[i]=2.-(2.*n-1.)/L
else: # odd L
for i in range(int((L+1)/2)):
n=i+1
w[i]=2.*n/(L+1.)
for i in range(int((L+1)/2),L):
n=i+1
w[i]=2.-2.*n/(L+1.)
return w
@jit
def gain(self,tpow=0,epow=0,agc=False,agc_gate=0.5,norm="rms"):
# Apply gain function
# tpow=0: t**tpow
# epow=0: exp(epow*t)
# agc=False: use automatic gain control (ignore tpow & epow)
# agc_gate: agc window size [seconds]
# norm='rms': normalize agc result by 'rms' or 'amplitude'
# output: gained SeismicTrace
trace = np.zeros_like(self.data)
data = self.data
ntr=get_ntr(self)
ns=get_ns(self)
dt=get_dt(self)
if not agc:
t = np.arange(ns)*dt
t = t**tpow * np.exp(t*epow)
for itr in range(ntr):
trace[itr,:] = data[itr,:]*t
else: # agc gain
L=agc_gate/dt+1
L=int(np.floor(L/2))
h=triang(2*L+1)
for k in range(ntr):
e=data[k,:]**2
rms=np.sqrt(np.convolve(e,h,'same'))
epsi=1.e-10*np.max(rms)
if epsi==0.: continue
op=rms/(rms**2+epsi)
trace[k,:]=data[k,:]*op
if norm=='amplitude': # normalize by amplitude
trace[k,:]/=np.max(np.abs(trace[k,:]))
elif norm=='rms':
trace[k,:]/=np.sqrt(np.sum(trace[k,:]**2)/ns)
out=SeismicTrace(self.header,trace,self.logs(),self.nmo_picks)
out.add_log("gain: tpow=%s epow=%s agc=%s agc_gate=%s norm=%s"%(tpow,epow,agc,agc_gate,norm))
return out
@jit
def bpfilter(self,cut_off):
# Band-pass filter
# cut_off: [min.freq, max.freq]: frequency range to pass
# output: band-pass filtered SeismicTrace
dt=get_dt(self)
nyq=0.5/dt
b,a=scipy.signal.butter(5,np.array(cut_off)/nyq,btype='band')
#w,h=scipy.signal.freqz(b,a)
trace=scipy.signal.lfilter(b,a,self.data,axis=1)
out=SeismicTrace(self.header,trace,self.logs(),self.nmo_picks)
out.add_log("bpfilter: %s"%cut_off)
return out
def stack(self):
# Stack NMO-corrected CMP gathers
# output: stacked SeismicTrace
cmps=get_key(self,'cdp')
cmpu=get_key_unique(self,'cdp')
ns=get_ns(self)
dt=get_dt(self)
ncdp=len(cmpu)
stacked=np.zeros((ncdp,ns))
#for i,icmp in enumerate(cmpu):
# su1=window(self,'cdp',icmp)
# stacked[i,:]=np.sum(su1.data,axis=0)
for i,su1 in enumerate(trace_split(self,'cdp')):
stacked[i,:]=np.sum(su1.data,axis=0)
head=np.zeros(stacked.shape[0],dtype=SU_HEADER_DTYPE)
head['ns']=np.ones(stacked.shape[0],dtype=np.int32)*ns
head['dt']=np.ones(stacked.shape[0],dtype=np.int32)*dt*1000000
head['cdp']=cmpu
fold_num = np.array([sum(icmp==cmps) for icmp in cmpu])
head['shortpad']=fold_num
out=SeismicTrace(head,stacked,self.logs(),self.nmo_picks)
out.add_log('stack')
return out
@jit
def stolt_mig(self,v,dx):
# Stolt migration of CMP stacked data
# v: constant velocity
# dx: CMP interval
# output: migrated SeismicTrace
# python port of ezfkmig from http://www.biomecardio.com
Dstacked=self.data.T
nt,ncdp=Dstacked.shape
dt=get_dt(self)
num_f_pts=nt
num_pts=num_f_pts
U_w_kx=np.fft.fftshift(np.fft.fft2(Dstacked,(num_f_pts,num_pts)))
# linear interpolation
omega=2.*np.pi*np.linspace(-0.5,0.5,num_f_pts)/dt
kx=2.*np.pi*np.linspace(-0.5,0.5,num_pts)/dx
vv=v/np.sqrt(2.)
kz=vv*np.sign(omega)*np.sqrt(kx**2+omega**2/vv**2)
func=scipy.interpolate.interp2d(omega,kx,np.real(U_w_kx))
ifunc=scipy.interpolate.interp2d(omega,kx,np.imag(U_w_kx))
U_kz_kx=func(kz,kx)+ifunc(kz,kx)*1.0j
Dmigrated=np.real(np.fft.ifft2(np.fft.ifftshift(U_kz_kx)))[:,:ncdp]
out=SeismicTrace(self.header,Dmigrated.T,self.logs(),self.nmo_picks)
out.add_log('stold_mig: v=%s dx=%s'%(v,dx))
return out
@jit
def kirchhoff1(image,gather,times,isx,igx,dt,tdelay):
nx=image.shape[0]
nz=image.shape[1]
ntr=gather.shape[0]
nt=gather.shape[1]
#cdef int ix,iz,itr,it
#cdef double ts,tg,amp
for itr in range(ntr):
for ix in range(nx):
for iz in range(nz):
ts=times[isx,ix,iz]
tg=times[igx[itr],ix,iz]
it=int((ts+tg+tdelay)/dt)
if it<nt:
amp=gather[itr,it]
image[ix,iz]+=amp
return image
@jit
def kirchhoff(sd,h,times,tdelay):
nx,nz=times[0].shape
image=np.zeros((nx,nz))
nt=get_ns(sd)
dt=get_dt(sd)
h_in_meter=h*1000.
gathers=trace_split(sd,"sx")
nshot=len(gathers)
for ishot,gather in enumerate(gathers):
if ishot %10 ==0:
print(ishot,nshot)
sx=get_key(gather,"sx")[0]
gx=np.array(get_key(gather,"gx"))
isx=int(sx/h_in_meter)
igx=(gx/h_in_meter).astype(np.int32)
image=kirchhoff1(image,gather.data,times,isx,igx,dt,tdelay)
return image
@jit
def moving_average2d(vel,r1,r2):
n1,n2=vel.shape
svel=np.empty_like(vel)
for i in range(n1):
for j in range(n2):
svel[i,j]=np.average(vel[max(0,i-r1):min(i+r1,n1),max(0,j-r2):min(j+r2,n2)])
return svel
@jit
def zdiff2(img):
dimg=np.zeros_like(img)
nz=img.shape[1]
for iz in range(1,nz-1):
dimg[:,iz]=img[:,iz-1]-2.*img[:,iz]+img[:,iz+1]
return dimg
@jit
def rmsvel(sd):
dt=get_dt(sd)
ns=get_ns(sd)
at=np.array(range(ns))*dt
dic=sd.nmo_picks
if len(dic)==0:
print("Please run this after velocity analysis!!")
return
ncmp=len(dic.keys())
v1=np.empty((ns,ncmp))
cmpnums=np.sort(dic.keys())
for icmp,cmpnum in enumerate(cmpnums):
vt=dic[cmpnum]
v=vt[0]
t=vt[1]
vinterp=np.interp(at,t,v)
v1[:,icmp]=vinterp
cmpmin=cmpnums.min()
cmpmax=cmpnums.max()
cmps=get_key_unique(sd,'cdp')
cmprange=[cmpn for cmpn in cmps if cmpmin<=cmpn and cmpn<=cmpmax]
vrms=np.empty((ns,len(cmprange)))
for it in range(ns):
vrms[it,:]=np.interp(cmprange,cmpnums,v1[it,:])
return vrms
@jit
def intervalvel(sd):
vrms=rmsvel(sd)
vmin=vrms.min()
vmax=vrms.max()
vrms=moving_average2d(vrms,50,20)
dt=get_dt(sd)
ns=get_ns(sd)
at=np.array(range(ns))*dt
vint=np.empty_like(vrms)
vint[0,:]=vrms[0,:]
for it in range(1,ns):
vint[it,:]=sqrt((vrms[it,:]**2*at[it]-vrms[it-1,:]**2*at[it-1])/(at[it]-at[it-1]))
return np.clip(vint,vmin,vmax)
| gpl-3.0 | -7,563,901,515,248,345,000 | 24.302521 | 94 | 0.661574 | false |
Forage/Gramps | gramps/webapp/grampsdb/view/note.py | 1 | 6003 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Douglas S. Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
""" Views for Person, Name, and Surname """
## Gramps Modules
from webapp.utils import _, boolean, update_last_changed, StyledNoteFormatter, parse_styled_text, build_search
from webapp.grampsdb.models import Note
from webapp.grampsdb.forms import *
from webapp.libdjango import DjangoInterface
from webapp.dbdjango import DbDjango
## Django Modules
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import Context, RequestContext
## Globals
dji = DjangoInterface()
db = DbDjango()
snf = StyledNoteFormatter(db)
# add a note to a person:
# /note/add/person/c51759195496de06da3ca5ba2c1
def process_note_on_name(request, action, handle, order):
# add, edit, delete
raise Exception("testing")
def process_note(request, context, handle, act, add_to=None): # view, edit, save
"""
Process act on person. Can return a redirect.
"""
context["tview"] = _("Note")
context["tviews"] = _("Notes")
context["action"] = "view"
view_template = "view_note_detail.html"
if handle == "add":
act = "add"
if "action" in request.POST:
act = request.POST.get("action")
# Handle: edit, view, add, create, save, delete, share, save-share
if act == "share":
item, handle = add_to
context["pickform"] = PickForm("Pick note",
Note,
(),
request.POST)
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "save-share":
item, handle = add_to
pickform = PickForm("Pick note",
Note,
(),
request.POST)
if pickform.data["picklist"]:
parent_model = dji.get_model(item) # what model?
parent_obj = parent_model.objects.get(handle=handle) # to add
ref_handle = pickform.data["picklist"]
ref_obj = Note.objects.get(handle=ref_handle)
dji.add_note_ref(parent_obj, ref_obj)
dji.rebuild_cache(parent_obj) # rebuild cache
return redirect("/%s/%s%s#tab-notes" % (item, handle, build_search(request)))
else:
context["pickform"] = pickform
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "add":
note = Note(gramps_id=dji.get_next_id(Note, "N"))
notetext = ""
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act in ["view", "edit"]:
note = Note.objects.get(handle=handle)
genlibnote = db.get_note_from_handle(note.handle)
notetext = snf.format(genlibnote)
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act == "save":
note = Note.objects.get(handle=handle)
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "edit"
elif act == "create":
note = Note(handle=create_id())
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
if add_to:
item, handle = add_to
model = dji.get_model(item)
obj = model.objects.get(handle=handle)
dji.add_note_ref(obj, note)
dji.rebuild_cache(obj)
return redirect("/%s/%s#tab-notes" % (item, handle))
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "add"
elif act == "delete":
# FIXME: delete markup too for this note
note = Note.objects.get(handle=handle)
note.delete()
return redirect("/note/")
else:
raise Exception("Unhandled act: '%s'" % act)
context["noteform"] = noteform
context["object"] = note
context["notetext"] = notetext
context["note"] = note
context["action"] = act
return render_to_response(view_template, context)
| gpl-2.0 | -3,609,075,325,010,903,600 | 37.480769 | 110 | 0.596202 | false |
unicef/un-partner-portal | backend/unpp_api/apps/common/models.py | 1 | 3927 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator, FileExtensionValidator
from imagekit.models import ImageSpecField
from model_utils.models import TimeStampedModel
from pilkit.processors import ResizeToFill
from common.countries import COUNTRIES_ALPHA2_CODE, COUNTRIES_ALPHA2_CODE_DICT
class PointQuerySet(models.QuerySet):
def get_point(self, lat=None, lon=None, admin_level_1=None):
admin_level_1, _ = AdminLevel1.objects.get_or_create(
name=admin_level_1.get('name'),
country_code=admin_level_1['country_code'],
)
point, _ = self.get_or_create(lat=lat, lon=lon, admin_level_1=admin_level_1)
return point
class AdminLevel1(models.Model):
"""
Admin level 1 - is like California in USA or Mazowieckie in Poland
"""
name = models.CharField(max_length=255, null=True, blank=True)
country_code = models.CharField(max_length=3, choices=COUNTRIES_ALPHA2_CODE)
class Meta:
ordering = ['id']
unique_together = ('name', 'country_code')
def __str__(self):
return f"[{self.country_name}] {self.name}"
@property
def country_name(self):
return COUNTRIES_ALPHA2_CODE_DICT[self.country_code]
class Point(models.Model):
lat = models.DecimalField(
verbose_name='Latitude',
null=True,
blank=True,
max_digits=8,
decimal_places=5,
validators=[MinValueValidator(Decimal(-90)), MaxValueValidator(Decimal(90))]
)
lon = models.DecimalField(
verbose_name='Longitude',
null=True,
blank=True,
max_digits=8,
decimal_places=5,
validators=[MinValueValidator(Decimal(-180)), MaxValueValidator(Decimal(180))]
)
admin_level_1 = models.ForeignKey(AdminLevel1, related_name="points")
objects = PointQuerySet.as_manager()
class Meta:
ordering = ['id']
def __str__(self):
return "Point <pk:{}>".format(self.id)
class Sector(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ['id']
def __str__(self):
return "Sector: {} <pk:{}>".format(self.name, self.id)
class Specialization(models.Model):
name = models.CharField(max_length=255)
category = models.ForeignKey(Sector, related_name="specializations")
class Meta:
ordering = ['id']
def __str__(self):
return f'<{self.pk}> {self.category.name}: {self.name}'
class CommonFile(TimeStampedModel):
file_field = models.FileField(validators=(
FileExtensionValidator(settings.ALLOWED_EXTENSIONS),
))
# Only applicable for image files
__thumbnail = ImageSpecField(
source='file_field',
processors=[
ResizeToFill(150, 75)
],
format='JPEG',
options={
'quality': 80
},
)
class Meta:
ordering = ['id']
def __str__(self):
return f"CommonFile [{self.pk}] {self.file_field}"
@property
def thumbnail_url(self):
"""
Done this way to fail gracefully when trying to get thumbnail for non-image file
"""
try:
return self.__thumbnail.url
except OSError:
return None
@property
def has_existing_reference(self):
"""
Returns True if this file is referenced from at least one other object
"""
for attr_name in dir(self):
if attr_name == CommonFile.has_existing_reference.fget.__name__ or not hasattr(self, attr_name):
continue
attribute = getattr(self, attr_name)
if callable(getattr(attribute, 'exists', None)) and attribute.exists():
return True
return False
| apache-2.0 | -2,077,369,729,848,358,400 | 27.251799 | 108 | 0.624141 | false |
analysiscenter/dataset | batchflow/tests/metrics_test.py | 1 | 19428 | """Test SegmentationMetricsByPixels and SegmentationMetricsByInstances classes.
Also act as tests for ClassificationMetrics, since it's identical to
SegmentationMetricsByPixels.
Structurally, file consists of four classes, which respectively check:
- basic assembly process (shapes compatibility, confusion matrix corectness);
- evaluated result shape of SegmemtationMetricsByPixels for all metrics;
- similarly, evaluated result contents;
- so-called "subsampling" functions of SegmentationMetricsByInstances.
Test data is pre-defined, it's shape and contents were chosen for reasons
of balance between visual simplicity and test coverage diversity.
"""
# pylint: disable=import-error, no-name-in-module, invalid-name, protected-access
import numpy as np
import pytest
from batchflow.models.metrics import SegmentationMetricsByPixels, SegmentationMetricsByInstances
# Accuracy is not included because it can't process 'multiclass' parameter
# and therefore is being tested individually.
METRICS_LIST = ['tpr', 'fpr', 'fnr', 'tnr', 'prv', 'ppv', 'fdr', 'for', 'npv', 'plr', 'nlr', 'dor', 'f1s', 'jac']
BATCH_SIZE = 2
IMAGE_SIZE = 2
NUM_CLASSES = 3
# Set targets.
TARGETS = np.array([[[0, 1],
[2, 2]],
[[0, 0],
[1, 1]]])
# Set predictions as 'labels'.
LABELS = np.array([[[0, 1],
[1, 0]],
[[2, 0],
[1, 1]]])
# Onehots are basically like probas, just with all 0 and a single 1.
PROBA = np.eye(NUM_CLASSES)[LABELS]
# Logit function gives ±infs on degenerate case of 0s and 1s but works fine for sigmoid function.
LOGITS = np.where(PROBA > 0.5, np.inf, -np.inf)
"""First param stands for predictions variable, second — for predictions type, third — for axis with class info.
Transposed predictions correspond to 'channels_first' data format."""
PREDICTIONS = [(LABELS, 'labels', None),
(PROBA, 'proba', 3),
(LOGITS, 'logits', 3),
(np.transpose(PROBA, (3, 0, 1, 2)), 'proba', 0),
(np.transpose(LOGITS, (3, 0, 1, 2)), 'logits', 0)]
BAD_PREDICTIONS = [(LABELS[0], 'labels', None), # predictions ndim is less then targets' for labels
(PROBA, 'proba', None), # axis is None for multiclass proba
(LOGITS, 'logits', None)] # axis is None for multiclass logits
class TestAssembly:
"""Check metrics creation process."""
@pytest.mark.parametrize('SegmentationMetrics', [SegmentationMetricsByPixels, SegmentationMetricsByInstances])
@pytest.mark.parametrize('predictions, fmt, axis', BAD_PREDICTIONS)
def test_incompatibility_processing(self, SegmentationMetrics, predictions, fmt, axis):
"""Create metrics class with inconsistent targets and predictions
(different ndim, no axis when it's required), expecting ValueError.
Parameters
----------
SegmentationMetrics: SegmentationsMetricsByPixels or
SegmentationsMetricsByInstances
Metrics class
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
"""
with pytest.raises(ValueError):
SegmentationMetrics(TARGETS, predictions, fmt, NUM_CLASSES, axis)
params = [(SegmentationMetricsByPixels, np.array([[[1, 0, 1],
[0, 1, 1],
[0, 0, 0]],
[[1, 0, 0],
[0, 2, 0],
[1, 0, 0]]])),
(SegmentationMetricsByInstances, np.array([[[[0, 0],
[1, 1]],
[[0, 1],
[0, 0]]],
[[[0, 0],
[0, 1]],
[[0, 0],
[1, 0]]]]))]
@pytest.mark.parametrize('SegmentationMetrics, exp_matrix', params)
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
def test_confusion_matrix(self, SegmentationMetrics, exp_matrix, predictions, fmt, axis):
"""Compare contents of actual confusion matrix with expected ones
for metrics class assembled with given params.
Parameters
----------
SegmentationMetrics: SegmentationsMetricsByPixels or
SegmentationsMetricsByInstances
Metrics class
exp_matrix: np.array
Expected confusion matrix
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
"""
metric = SegmentationMetrics(TARGETS, predictions, fmt, NUM_CLASSES, axis)
res_matrix = metric._confusion_matrix
assert np.array_equal(res_matrix, exp_matrix)
class TestShape:
"""Check the shape of evaluated metrics return value for various parameters.
There is a following pattern in both tests:
0. Each function is preceded by data for it's parametrization.
1. Parametrizing decorators are applied.
2. Instance of SegmentationMetricsByPixels is being created.
3. Metric is being evaluated with given parameters.
4. It's result's shape is being compared with expected one.
"""
# First param stands for batch aggregation, second — for multiclass one, third represents expected output shape.
params = [(None, None, lambda l: (BATCH_SIZE, NUM_CLASSES - l)),
(None, 'micro', (BATCH_SIZE,)),
(None, 'macro', (BATCH_SIZE,)),
('mean', None, lambda l: (NUM_CLASSES - l,)),
('mean', 'micro', None),
('mean', 'macro', None)]
@pytest.mark.parametrize('metric_name', METRICS_LIST)
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, multi_agg, exp_shape', params)
@pytest.mark.parametrize('skip_bg', [False, True])
def test_shape(self, metric_name, predictions, fmt, axis, batch_agg, multi_agg, exp_shape, skip_bg):
"""Compare expected return value shape with actual return value shape of
metric evaluation with given params for all metrics from METRICS_LIST.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
multi_agg : string
Multiclass agregation type
exp_shape : None or tuple
Expected return value shape
skip_bg : False or True
If background class should be excluded from metrics evaluation
"""
if callable(exp_shape):
exp_shape = exp_shape(skip_bg)
metric = SegmentationMetricsByPixels(targets=TARGETS, predictions=predictions, fmt=fmt,
num_classes=NUM_CLASSES, axis=axis, skip_bg=skip_bg)
res = metric.evaluate(metrics=metric_name, agg=batch_agg, multiclass=multi_agg)
res_shape = res.shape if isinstance(res, np.ndarray) else None
assert res_shape == exp_shape
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, exp_shape', [(None, (BATCH_SIZE,)), ('mean', None)])
def test_shape_accuracy(self, predictions, fmt, axis, batch_agg, exp_shape):
"""Compare expected return value shape with actual return value shape of
accuracy metric evaluation with given params.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
exp_shape : None or tuple
Expected return value shape
"""
metric = SegmentationMetricsByPixels(TARGETS, predictions, fmt, NUM_CLASSES, axis)
res = metric.evaluate(metrics='accuracy', agg=batch_agg)
res_shape = res.shape if isinstance(res, np.ndarray) else None
assert res_shape == exp_shape
class TestResult:
"""Check evaluated metrics return value for various parameters.
There is a following pattern in both tests:
0. Each function is preceded by data for it's parametrization.
1. Parametrizing decorators are applied.
2. Instance of SegmentationMetricsByPixels is being created.
3. Metric is being evaluated with given parameters.
4. It's result is being compared with expected one.
"""
# First param stands for batch aggregation type, second — for multiclass one,
# third represents manually pre-calculated expected output contents for each type of metrics.
params = [(None, None, {'tpr' : np.array([1.00, 1.00, 0.00, 0.50, 1.00, 1.00]),
'fpr' : np.array([0.33, 0.33, 0.00, 0.00, 0.00, 0.25]),
'tnr' : np.array([0.66, 0.66, 1.00, 1.00, 1.00, 0.75]),
'fnr' : np.array([0.00, 0.00, 1.00, 0.50, 0.00, 0.00]),
'prv' : np.array([0.25, 0.25, 0.50, 0.50, 0.50, 0.00]),
'ppv' : np.array([0.50, 0.50, 1.00, 1.00, 1.00, 0.00]),
'fdr' : np.array([0.50, 0.50, 0.00, 0.00, 0.00, 1.00]),
'for' : np.array([0.00, 0.00, 0.50, 0.33, 0.00, 0.00]),
'npv' : np.array([1.00, 1.00, 0.50, 0.66, 1.00, 1.00]),
'plr' : np.array([3.00, 3.00, 0.00, np.inf, np.inf, 4.00]),
'nlr' : np.array([0.00, 0.00, 1.00, 0.50, 0.00, 0.00]),
'dor' : np.array([np.inf, np.inf, 0.00, np.inf, np.inf, np.inf]),
'f1s' : np.array([0.66, 0.66, 0.00, 0.66, 1.00, 0.00]),
'jac' : np.array([0.49, 0.49, 0.00, 0.49, 1.00, 0.00])}),
(None, 'micro', {'tpr' : np.array([0.50, 0.75]),
'fpr' : np.array([0.25, 0.12]),
'tnr' : np.array([0.75, 0.87]),
'fnr' : np.array([0.50, 0.25]),
'prv' : np.array([0.33, 0.33]),
'ppv' : np.array([0.50, 0.75]),
'fdr' : np.array([0.50, 0.25]),
'for' : np.array([0.25, 0.12]),
'npv' : np.array([0.75, 0.87]),
'plr' : np.array([3.00, 10.00]),
'nlr' : np.array([0.42, 0.18]),
'dor' : np.array([6.00, np.inf]),
'f1s' : np.array([0.50, 0.75]),
'jac' : np.array([0.33, 0.60])}),
(None, 'macro', {'tpr' : np.array([0.66, 0.83]),
'fpr' : np.array([0.22, 0.08]),
'tnr' : np.array([0.77, 0.91]),
'fnr' : np.array([0.33, 0.16]),
'prv' : np.array([0.33, 0.33]),
'ppv' : np.array([0.66, 0.66]),
'fdr' : np.array([0.33, 0.33]),
'for' : np.array([0.16, 0.11]),
'npv' : np.array([0.83, 0.88]),
'plr' : np.array([2.00, 4.00]),
'nlr' : np.array([0.33, 0.16]),
'dor' : np.array([0.00, np.inf]),
'f1s' : np.array([0.66, 0.74]),
'jac' : np.array([0.50, 0.58])}),
('mean', None, {'tpr' : np.array([0.75, 1.00, 0.50]),
'fpr' : np.array([0.16, 0.16, 0.12]),
'tnr' : np.array([0.83, 0.83, 0.87]),
'fnr' : np.array([0.25, 0.00, 0.50]),
'prv' : np.array([0.37, 0.37, 0.25]),
'ppv' : np.array([0.75, 0.75, 0.50]),
'fdr' : np.array([0.25, 0.25, 0.50]),
'for' : np.array([0.16, 0.00, 0.25]),
'npv' : np.array([0.83, 1.00, 0.75]),
'plr' : np.array([3.00, 3.00, 2.00]),
'nlr' : np.array([0.25, 0.00, 0.50]),
'dor' : np.array([np.inf, np.inf, 0.00]),
'f1s' : np.array([0.66, 0.83, 0.00]),
'jac' : np.array([0.50, 0.75, 0.00])}),
('mean', 'micro', {'tpr' : np.array([0.62]),
'fpr' : np.array([0.18]),
'tnr' : np.array([0.81]),
'fnr' : np.array([0.37]),
'prv' : np.array([0.33]),
'ppv' : np.array([0.62]),
'fdr' : np.array([0.37]),
'for' : np.array([0.18]),
'npv' : np.array([0.81]),
'plr' : np.array([6.50]),
'nlr' : np.array([0.30]),
'dor' : np.array([6.00]),
'f1s' : np.array([0.62]),
'jac' : np.array([0.46])}),
('mean', 'macro', {'tpr' : np.array([0.75]),
'fpr' : np.array([0.15]),
'tnr' : np.array([0.84]),
'fnr' : np.array([0.25]),
'prv' : np.array([0.33]),
'ppv' : np.array([0.66]),
'fdr' : np.array([0.33]),
'for' : np.array([0.13]),
'npv' : np.array([0.86]),
'plr' : np.array([3.00]),
'nlr' : np.array([0.25]),
'dor' : np.array([0.00]),
'f1s' : np.array([0.70]),
'jac' : np.array([0.54])})]
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, multi_agg, exp_dict', params)
def test_result(self, predictions, fmt, axis, batch_agg, multi_agg, exp_dict):
"""Compare expected evaluated metrics return value with actual one
with given params for all metrics from METRICS_DICT.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
multi_agg : string
Multiclass agregation type
exp_dict : dict
Keys are metric's aliases and values are expected contents
of their evaluation results with given aggregation params
"""
metric = SegmentationMetricsByPixels(TARGETS, predictions, fmt, NUM_CLASSES, axis)
for metric_name, exp in exp_dict.items():
res = metric.evaluate(metrics=metric_name, agg=batch_agg, multiclass=multi_agg)
res = res.reshape(-1) if isinstance(res, np.ndarray) else [res]
assert np.allclose(res, exp, atol=1e-02, rtol=0), 'failed on metric {}'.format(metric_name)
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, exp', [(None, np.array([0.50, 0.75])), ('mean', np.array([0.62]))])
def test_result_accuracy(self, predictions, fmt, axis, batch_agg, exp):
"""Compare expected evaluated metrics return value actual one
with given params for `accuracy` metrics.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
exp : np.array
Expected `accuracy` evaluation result with given aggregation params
"""
metric = SegmentationMetricsByPixels(TARGETS, predictions, fmt, NUM_CLASSES, axis)
res = metric.evaluate(metrics='accuracy', agg=batch_agg)
res = res.reshape(-1) if isinstance(res, np.ndarray) else [res]
assert np.allclose(res, exp, atol=1e-02, rtol=0), 'failed on metric {}'.format('accuracy')
class TestSubsampling:
"""Check the correctness of confusion matrix subsampling functions result
for SegmentationMetricsByInstances class (e.g. true_positive subsample,
total_population subsample). Test functions here act as an equivalent of
TestResult functions for SegmentationMetricsByInstances class, since it
differs from SegmentationMetricsByPixels in redefined subsampling functions
(and confusion matrix assembly process, which is checked in TestAssembly).
"""
params = [('true_positive', np.array([[1, 0],
[1, 0]])),
('condition_positive', np.array([[1, 1],
[1, 0]])),
('prediction_positive', np.array([[2, 0],
[1, 1]])),
('total_population', np.array([[2, 1],
[1, 1]]))]
@pytest.mark.parametrize('subsample_name, exp_subsample', params)
def test_subsampling(self, subsample_name, exp_subsample):
"""Compare expected subsample with actual one.
Parameters
----------
subsample_name: string
Name of confusion matrix subsample
exp_subsample: np.array
Expected subsample of confusion matrix
"""
metric = SegmentationMetricsByInstances(TARGETS, LABELS, 'labels', NUM_CLASSES)
res_subsample = getattr(metric, subsample_name)()
assert np.array_equal(res_subsample, exp_subsample)
def test_subsampling_true_negative(self):
"""Check if subsampling true negative raises ValueError."""
metric = SegmentationMetricsByInstances(TARGETS, LABELS, 'labels', NUM_CLASSES)
with pytest.raises(ValueError):
getattr(metric, 'true_negative')()
| apache-2.0 | -1,080,217,411,090,379,900 | 45.343675 | 116 | 0.510506 | false |
Zanzibar82/script.module.urlresolver | lib/urlresolver/plugins/exashare.py | 1 | 6489 | """
Exashare.com urlresolver XBMC Addon
Copyright (C) 2014 JUL1EN094
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib,urllib2,os,re,xbmc
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class ExashareResolver(Plugin,UrlResolver,PluginSettings):
implements = [UrlResolver,SiteAuth,PluginSettings]
name = "exashare"
domains = [ "exashare.com" ]
profile_path = common.profile_path
cookie_file = os.path.join(profile_path,'%s.cookies'%name)
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'
def __init__(self):
p=self.get_setting('priority') or 100
self.priority=int(p)
self.net=Net()
#UrlResolver methods
def get_media_url(self, host, media_id):
base_url = 'http://www.' + host + '.com/' + media_id
headers = {'User-Agent': self.USER_AGENT, 'Referer': 'http://www.' + host + '.com/'}
try: html = self.net.http_GET(base_url).content
except: html = self.net.http_GET(base_url, headers=headers).content
if re.search("""File Not Found""", html):
raise UrlResolver.ResolverError('File not found or removed')
POST_Url = re.findall('form method="POST" action=\'(.*)\'',html)[0]
POST_Selected = re.findall('form method="POST" action=(.*)</Form>',html,re.DOTALL)[0]
POST_Data = {}
POST_Data['op'] = re.findall('input type="hidden" name="op" value="(.*)"',POST_Selected)[0]
POST_Data['usr_login'] = re.findall('input type="hidden" name="usr_login" value="(.*)"',POST_Selected)[0]
POST_Data['id'] = re.findall('input type="hidden" name="id" value="(.*)"',POST_Selected)[0]
POST_Data['fname'] = re.findall('input type="hidden" name="fname" value="(.*)"',POST_Selected)[0]
POST_Data['referer'] = re.findall('input type="hidden" name="referer" value="(.*)"',POST_Selected)[0]
POST_Data['hash'] = re.findall('input type="hidden" name="hash" value="(.*)"',POST_Selected)[0]
POST_Data['imhuman'] = 'Proceed to video'
try : html2 = self.net.http_POST(POST_Url,POST_Data).content
except : html2 = self.net.http_POST(POST_Url,POST_Data,headers=headers).content
stream_url = re.findall('file:\s*"([^"]+)"', html2)[0]
if self.get_setting('login') == 'true':
cookies = {}
for cookie in self.net._cj:
cookies[cookie.name] = cookie.value
if len(cookies) > 0:
stream_url = stream_url + '|' + urllib.urlencode({'Cookie': urllib.urlencode(cookies)})
common.addon.log('stream_url : ' + stream_url)
xbmc.sleep(7000)
return stream_url
def get_url(self,host,media_id):
return 'http://www.exashare.com/%s' % media_id
def get_host_and_id(self,url):
r=re.search('http://(?:www.)?(.+?).com/(?:embed\-)?([0-9A-Za-z_]+)(?:\-[0-9]+x[0-9]+.html)?',url)
if r:
ls=r.groups()
return ls
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled')=='false' or self.get_setting('login')=='false':
return False
return re.match('http://(?:www.)?exashare.com/(?:embed\-)?[0-9A-Za-z]+(?:\-[0-9]+x[0-9]+.html)?',url) or 'exashare.com' in host
#SiteAuth methods
def needLogin(self):
url='http://www.exashare.com/?op=my_account'
if not os.path.exists(self.cookie_file):
common.addon.log_debug('needLogin returning True')
return True
self.net.set_cookies(self.cookie_file)
source=self.net.http_GET(url).content
if re.search("""Your username is for logging in and cannot be changed""",source):
common.addon.log_debug('needLogin returning False')
return False
else:
common.addon.log_debug('needLogin returning True')
return True
def login(self):
if (self.get_setting('login')=='true'):
if self.needLogin():
common.addon.log('logging in exashare')
url='http://www.exashare.com/'
data={'login':self.get_setting('username'),'password':self.get_setting('password'),'op':'login','redirect':'/login.html'}
headers={'User-Agent':self.USER_AGENT,'Referer':url}
try: source=self.net.http_POST(url,data).content
except: source=self.net.http_POST(url,data,headers=headers).content
if re.search('Your username is for logging in and cannot be changed',source):
common.addon.log('logged in exashare')
self.net.save_cookies(self.cookie_file)
self.net.set_cookies(self.cookie_file)
return True
else:
common.addon.log('error logging in exashare')
return False
else:
if os.path.exists(self.cookie_file): os.remove(self.cookie_file)
return False
#PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="ExashareResolver_login" '
xml += 'type="bool" label="Login" default="false"/>\n'
xml += '<setting id="ExashareResolver_username" enable="eq(-1,true)" '
xml += 'type="text" label=" username" default=""/>\n'
xml += '<setting id="ExashareResolver_password" enable="eq(-2,true)" '
xml += 'type="text" label=" password" option="hidden" default=""/>\n'
return xml
| gpl-2.0 | -6,906,511,386,082,544,000 | 48.159091 | 137 | 0.601788 | false |
cc-it/odoo_mod | currency_rate_update/services/update_service_CA_BOC.py | 1 | 4006 | # -*- coding: utf-8 -*-
# © 2009 Camptocamp
# © 2014 Daniel Dico
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .currency_getter_interface import CurrencyGetterInterface
from openerp import _
from openerp.exceptions import except_orm
import logging
_logger = logging.getLogger(__name__)
class CA_BOCGetter(CurrencyGetterInterface):
"""Implementation of Curreny_getter_factory interface
for Bank of Canada RSS service
"""
# Bank of Canada is using RSS-CB
# http://www.cbwiki.net/wiki/index.php/Specification_1.1
# This RSS format is used by other national banks
# (Thailand, Malaysia, Mexico...)
code = 'CA_BOC'
name = 'Bank of Canada - noon rates'
supported_currency_array = [
"AED", "ANG", "ARS", "AUD", "BOC", "BRL", "BSD", "CHF", "CLP", "CNY",
"COP", "CZK", "DKK", "EUR", "FJD", "GBP", "GHS", "GTQ", "HKD", "HNL",
"HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JMD", "JPY", "KRW", "LKR",
"MAD", "MMK", "MXN", "MYR", "NOK", "NZD", "PAB", "PEN", "PHP", "PKR",
"PLN", "RON", "RSD", "RUB", "SEK", "SGD", "THB", "TND", "TRY", "TTD",
"TWD", "USD", "VEF", "VND", "XAF", "XCD", "XPF", "ZAR"]
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# as of Jan 2014 BOC is publishing noon rates for about 60 currencies
url = ('http://www.bankofcanada.ca/stats/assets/'
'rates_rss/noon/en_%s.xml')
# closing rates are available as well (please note there are only 12
# currencies reported):
# http://www.bankofcanada.ca/stats/assets/rates_rss/closing/en_%s.xml
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
import feedparser
import pytz
from dateutil import parser
for curr in currency_array:
_logger.debug("BOC currency rate service : connecting...")
dom = feedparser.parse(url % curr)
self.validate_cur(curr)
# check if BOC service is running
if dom.bozo and dom.status != 404:
_logger.error("Bank of Canada - service is down - try again\
later...")
# check if BOC sent a valid response for this currency
if dom.status != 200:
_logger.error("Exchange data for %s is not reported by Bank\
of Canada." % curr)
raise except_orm(_('Error !'), _('Exchange data for %s is not '
'reported by Bank of Canada.'
% str(curr)))
_logger.debug("BOC sent a valid RSS file for: " + curr)
# check for valid exchange data
if (dom.entries[0].cb_basecurrency == main_currency) and \
(dom.entries[0].cb_targetcurrency == curr):
rate = dom.entries[0].cb_exchangerate.split('\n', 1)[0]
rate_date_datetime = parser.parse(dom.entries[0].updated)\
.astimezone(pytz.utc).replace(tzinfo=None)
self.check_rate_date(rate_date_datetime, max_delta_days)
self.updated_currency[curr] = rate
_logger.debug("BOC Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
else:
_logger.error(
"Exchange data format error for Bank of Canada -"
"%s. Please check provider data format "
"and/or source code." % curr)
raise except_orm(_('Error !'),
_('Exchange data format error for '
'Bank of Canada - %s !' % str(curr)))
return self.updated_currency, self.log_info
| mit | -7,019,638,201,694,220,000 | 40.708333 | 79 | 0.539461 | false |
MillerCMBLabUSC/lab_analysis | apps/4f_model/OldCode/IPCalc.py | 1 | 7846 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 8 10:51:21 2017
@author: jlashner
"""
from pylab import *
import tmm
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as intg
"""
Constants and System Parameters
"""
#speed of light [m/s]
c =2.99792458 * 10**8
GHz = 10 ** 9
"""
Helpful functions to calculate IP / AR coating
"""
def getIP(n, d, freq, theta):
"""
Gets IP of optical elements
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq : float [Hz]
Frequency
theta:
Incident angle
"""
lam_vac = c / freq * 1000.
# lam_vac = 2.0
s = tmm.coh_tmm('s',n, d,theta,lam_vac)
p = tmm.coh_tmm('p',n, d,theta,lam_vac)
return -(s['T']-p['T'])/2
def getPolAbs(n, d, freq, theta):
"""
Gets Polarized Absorption of optical elements
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq : float [Hz]
Frequency
theta:
Incident angle
"""
lam_vac = c / freq * 1000.
s = tmm.coh_tmm('s',n, d,theta,lam_vac)
p = tmm.coh_tmm('p',n, d,theta,lam_vac)
sA = 1 - s['T'] - s['R']
pA = 1 - p['T'] - p['R']
return -((sA - pA)/2)
def getBandAverage(n, d, freq, fbw, theta, divisions=100):
"""
Gets band averaged IP of stack
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq :
Band center
fbw:
Fractional bandwidth
theta:
Incident angle
"""
flo = freq * (1. - .5 * fbw)
fhi = freq * (1. + .5 * fbw)
fs = np.linspace(flo, fhi, divisions)
ips = np.array(map(lambda x : getIP(n,d,x, theta), fs))
return trapz(ips, fs) / (fhi - flo)
def getBandAverageAbs(n, d, freq, fbw, theta, divisions=100):
"""
Gets band averaged IP of stack
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq :
Band center
fbw:
Fractional bandwidth
theta:
Incident angle
"""
flo = freq * (1. - .5 * fbw)
fhi = freq * (1. + .5 * fbw)
fs = np.linspace(flo, fhi, divisions)
ips = np.array(map(lambda x : getPolAbs(n,d,x, theta), fs))
return trapz(ips, fs) / (fhi - flo)
def ARCoat(n, lam0):
"""
Gets Index of refraction and thickness for AR coating
Parameters
----------
n : float
Index of refraction of element to be coated
lam0 : float
Optimized Wavelength [mm]
"""
ni= .00008
nAR = [real(n)**(1./3) + ni*1j, real(n)**(2./3) + ni * 1j]
dAR = map(lambda x : lam0 / (4.0 * real(x)), nAR)
return nAR, dAR
def ARCoatOld(n, lam0):
"""
Gets Index of refraction and thickness for AR coating
Parameters
----------
n : float
Index of refraction of element to be coated
lam0 : float
Optimized Wavelength [mm]
"""
nAR = [real(n)**(1./2)]
dAR = map(lambda x : lam0 / (4.0 * real(x)), nAR)
return nAR, dAR
def getWinIP(freq, fbw, theta):
"""
Gets IP for a window
Parameters
==========
freq : float [Hz]
Band center
fbw : float
Fractional Bandwidth
theta : float [rad]
Incident angle
"""
n = 1.5 + .0001j
nARwin, dARwin = ARCoat(n, 2.5)
n_window = [1.0] + nARwin + [n] + nARwin[::-1] + [1.0]
d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
return (getBandAverage(n_window, d_window, freq, fbw, theta), \
getBandAverageAbs(n_window, d_window, freq, fbw, theta))
def getFilterIP(freq, fbw, theta):
"""
Gets IP for a window
Parameters
==========
freq : float [Hz]
Band center
fbw : float
Fractional Bandwidth
theta : float [rad]
Incident angle
"""
n = 3.1 + .00008j
nAR, dAR = ARCoat(n, 2.5)
n_AluminaF = [1.0] + nAR + [n] + nAR[::-1] + [1.0]
d_AluminaF = [Inf] + dAR + [2.0] + dAR[::-1] + [Inf]
return (getBandAverage(n_AluminaF, d_AluminaF, freq, fbw, theta), \
getBandAverageAbs(n_AluminaF, d_AluminaF, freq, fbw, theta))
if __name__ == "__main__":
bc = np.array([93.0 * GHz,145. * GHz]) # Band center [Hz]
fbw = np.array([.376, .276]) #Fractional bandwidth
flo = bc * (1 - fbw/2.)
fhi = bc * (1 + fbw/2.)
thetas = map(np.deg2rad, [15./2,20./2,25./2,30./2])
for t in thetas:
wIP1, fIP1 = getWinIP(bc[0], fbw[0], t)[0]*100, getFilterIP(bc[0], fbw[0], t)[0]*100
wIP2, fIP2 = getWinIP(bc[1], fbw[1], t)[0]*100, getFilterIP(bc[1], fbw[1], t)[0]*100
print "%.1f & %.3f & %.3f & %.3f & %.3f & %.3f & %.3f\\\\"%(np.rad2deg(t), wIP1, wIP2, fIP1, fIP2, wIP1 + 2 * fIP2, wIP2 + 2 * fIP2)
# nARwin, dARwin = ARCoat(1.5, 2.5)
# n = 1.5 + .0001j
# n_window = [1.0] + nARwin + [n] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
##
## n = 3.1 + .00008j
## nAR, dAR = ARCoat(n, 2.5)
## n_AluminaF = [1.0] + nAR + [n] + nAR[::-1] + [1.0]
## d_AluminaF = [Inf] + dAR + [2.0] + dAR[::-1] + [Inf]
## freqs = np.linspace(flo[0], fhi[1], 100)
## refs = []
## for f in freqs:
## lam = c / f * 1000
## refs += [tmm.coh_tmm('s',n_AluminaF, d_AluminaF, theta,lam)['R']]
##
## plt.plot(freqs, refs)
## plt.show()
##
##
# print getFilterIP(band_center[0], fbw[0], np.deg2rad(15.))
#
# i = 1
# theta = np.deg2rad(15.)
# freqs = np.linspace(flo[i], fhi[i], 100)
#
# s_array = []
# p_array = []
#
# for f in freqs:
# lam = c / f * 1000
#
# s_array += [tmm.coh_tmm('s',n_AluminaF, d_AluminaF, theta,lam)]
# p_array += [tmm.coh_tmm('p',n_AluminaF, d_AluminaF, theta,lam)]
#
# ts = np.array(map(lambda x : x['T'], s_array))
# tp = np.array(map(lambda x : x['T'], p_array))
# rs = np.array(map(lambda x : x['R'], s_array))
# rp = np.array(map(lambda x : x['R'], p_array))
# As = 1 - ts - rs
# Ap = 1 - tp - rp
# tsave = trapz(ts, freqs) / (fhi[i]- flo[i] )
# tpave = trapz(tp, freqs) / (fhi[i]- flo[i] )
# print trapz((ts - tp)/2, freqs) / (fhi[i]- flo[i] )
# rsave = trapz(rs, freqs) / (fhi[i]- flo[i] )
# rpave = trapz(rp, freqs) / (fhi[i]- flo[i] )
# Asave = trapz(As, freqs) / (fhi[i]- flo[i] )
# Apave = trapz(Ap, freqs) / (fhi[i]- flo[i] )
#
# print tsave, rsave, Asave
# print tpave, rpave, Apave
# print .5 * (tsave - tpave), .5 * (rsave - rpave), .5 * (Asave - Apave)
#
#
#
# ips93 = []
# ips145 = []
# ips93Old = []
# ips145Old = []
# freqs = np.linspace(90. * GHz, 160 * GHz, 50)
#
#
#
#
# for f0 in freqs:
# lam0 = c / f0 * 1000.
# nARwin, dARwin = ARCoat(1.5, lam0)
# n_window = [1.0] + nARwin + [1.5] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
# theta = np.deg2rad(30.0/2)
# ips93 += [getBandAverage(n_window, d_window, band_center[0], fbw[0], theta)]
# ips145 += [getBandAverage(n_window, d_window, band_center[1], fbw[1], theta)]
#
# nARwin, dARwin = ARCoatOld(1.5, lam0)
# n_window = [1.0] + nARwin + [1.5] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
# theta = np.deg2rad(30.0/2)
# ips93Old += [getBandAverage(n_window, d_window, band_center[0], fbw[0], theta)]
# ips145Old += [getBandAverage(n_window, d_window, band_center[1], fbw[1], theta)]
#
#
#
| gpl-2.0 | 4,520,980,213,326,978,600 | 23.442368 | 140 | 0.520902 | false |
M4rtinK/modrana | modules/gui_modules/gui_qt5/gui_qt5.py | 1 | 42517 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A modRana Qt 5 QtQuick 2.0 GUI module
# * it inherits everything in the base GUI module
# * overrides default functions and handling
#----------------------------------------------------------------------------
# Copyright 2013, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
import os
import re
import pyotherside
try:
from StringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
# modRana imports
import math
from modules.gui_modules.base_gui_module import GUIModule
import time
import threading
from core import signal
from core import constants
from core.threads import threadMgr
from core import modrana_log
from core import utils
from core import paths
from core import point
import logging
no_prefix_log = logging.getLogger()
log = logging.getLogger("mod.gui.qt5")
qml_log = logging.getLogger("mod.gui.qt5.qml")
SEARCH_STATUS_PREFIX = "search:status:"
SEARCH_RESULT_PREFIX = "search:result:"
def newlines2brs(text):
""" QML uses <br> instead of \n for linebreak """
return re.sub('\n', '<br>', text)
def getModule(*args, **kwargs):
return QMLGUI(*args, **kwargs)
def point2dict(point):
""" Convert a Point instance to a dict
:param Point point: a Point object instance
:returns dict: a dict representation of the point
"""
return {
"name" : point.name,
"description" : point.description,
"latitude" : point.lat,
"longitude" : point.lon,
"elevation" : point.elevation,
"highlight" : False,
"mDistance" : 0, # will be filled in on QML side
"db_id" : getattr(point, "db_index", None),
"category_id" : getattr(point, "db_category_index", None)
}
class QMLGUI(GUIModule):
"""A Qt 5 + QtQuick 2 GUI module"""
def __init__(self, *args, **kwargs):
GUIModule.__init__(self, *args, **kwargs)
# some constants
self.msLongPress = 400
self.centeringDisableThreshold = 2048
self.firstTimeSignal = signal.Signal()
size = (800, 480) # initial window size
self._screen_size = None
# positioning related
self._pythonPositioning = False
# we handle notifications by forwarding them to the QML context
self.modrana.notificationTriggered.connect(self._dispatchNotificationCB)
# register exit handler
#pyotherside.atexit(self._shutdown)
# FIXME: for some reason the exit handler is never
# called on Sailfish OS, so we use a onDestruction
# handler on the QML side to trigger shutdown
# window state
self._fullscreen = False
# get screen resolution
# TODO: implement this
#screenWH = self.getScreenWH()
#self.log.debug(" @ screen size: %dx%d" % screenWH)
#if self.highDPI:
# self.log.debug(" @ high DPI")
#else:
# self.log.debug(" @ normal DPI")
# NOTE: what about multi-display devices ? :)
## add image providers
self._imageProviders = {
"icon" : IconImageProvider(self),
"tile" : TileImageProvider(self),
}
# log what version of PyOtherSide we are using
# - we log this without prefix as this shows up early
# during startup, so it looks nicer that way :-)
no_prefix_log.info("using PyOtherSide %s", pyotherside.version)
## register the actual callback, that
## will call the appropriate provider base on
## image id prefix
pyotherside.set_image_provider(self._selectImageProviderCB)
# initialize theming
self._theme = Theme(self)
## make constants accessible
#self.constants = self.getConstants()
#rc.setContextProperty("C", self.constants)
## connect to the close event
#self.window.closeEvent = self._qtWindowClosed
##self.window.show()
self._notificationQueue = []
# provides easy access to modRana modules from QML
self.modules = Modules(self)
# search functionality for the QML context
self.search = Search(self)
# POI handling for the QML context
self.POI = POI(self)
# make the log manager easily accessible
self.log_manager = modrana_log.log_manager
# log for log messages from the QML context
self.qml_log = qml_log
# queue a notification to QML context that
# a Python loggers is available
pyotherside.send("loggerAvailable")
# tracklogs
self.tracklogs = Tracklogs(self)
#routing
self.routing = Routing(self)
# turn by turn navigation
self.navigation = Navigation(self)
def firstTime(self):
# trigger the first time signal
self.firstTimeSignal()
self.modules.location.positionUpdate.connect(self._pythonPositionUpdateCB)
def _shutdown(self):
"""Called by PyOtherSide once the QML side is shutdown.
"""
self.log.info("Qt 5 GUI module shutting down")
self.modrana.shutdown()
def getIDString(self):
return "Qt5"
def needsLocalhostTileserver(self):
"""
the QML GUI needs the localhost tileserver
for efficient and responsive tile loading
"""
return False
def isFullscreen(self):
return self._fullscreen
def toggleFullscreen(self):
# TODO: implement this
pass
def setFullscreen(self, value):
pass
def setCDDragThreshold(self, threshold):
"""set the threshold which needs to be reached to disable centering while dragging
basically, larger threshold = longer drag is needed to disable centering
default value = 2048
"""
self.centeringDisableThreshold = threshold
def hasNotificationSupport(self):
return True
def _dispatchNotificationCB(self, text, msTimeout=5000, icon=""):
"""Let the QML context know that it should show a notification
:param str text: text of the notification message
:param int msTimeout: how long to show the notification in ms
"""
self.log.debug("notify:\n message: %s, timeout: %d" % (text, msTimeout))
pyotherside.send("pythonNotify", {
"message" : newlines2brs(text), # QML uses <br> in place of \n
"timeout" : msTimeout
})
def openUrl(self, url):
# TODO: implement this
pass
def _getTileserverPort(self):
m = self.m.get("tileserver", None)
if m:
return m.getServerPort()
else:
return None
def getScreenWH(self):
return self._screen_size
def getModRanaVersion(self):
"""
report current modRana version or None if version info is not available
"""
version = self.modrana.paths.version_string
if version is None:
return "unknown"
else:
return version
def setPosition(self, posDict):
if self._pythonPositioning:
# ignore the setPosition call if Python-side positioning
# is used as the Python side already has fresh position data
return
lat, lon = float(posDict["latitude"]), float(posDict["longitude"])
elevation = float(posDict["elevation"])
metersPerSecSpeed = float(posDict["speedMPS"]) # m/s
# report that we have 3D fix
# (looks like we can't currently reliably discern between 2D
# and 3D fix on the Jolla, might be good to check what other
# Sailfish OS running devices report)
self.set("fix", 3)
self.set("pos", (lat, lon))
# check if elevation is valid
if not math.isnan(elevation):
self.set("elevation", elevation)
else:
self.set("elevation", None)
# check if speed is valid
if not math.isnan(metersPerSecSpeed):
self.set("speed", metersPerSecSpeed*3.6)
self.set("metersPerSecSpeed", metersPerSecSpeed)
else:
self.set("speed", None)
self.set("metersPerSecSpeed", None)
# update done
self.set('locationUpdated', time.time())
# TODO: move part of this to the location module ?
def _pythonPositionUpdateCB(self, fix):
self._pythonPositioning = True
if fix.position:
(lat, lon) = fix.position
else:
(lat, lon) = None, None
# magnetic variation might sometimes not be set
magnetic_variation = 0.0
magnetic_variation_valid = False
if fix.magnetic_variation is not None:
magnetic_variation = fix.magnetic_variation
magnetic_variation_valid = True
pyotherside.send("pythonPositionUpdate", {
"latitude" : lat,
"longitude" : lon,
"altitude" : fix.altitude,
"speed" : fix.speed,
"verticalSpeed" : fix.climb,
"horizontalAccuracy" : fix.horizontal_accuracy,
"verticalAccuracy" : fix.vertical_accuracy,
"direction" : fix.bearing,
"magneticVariation" : magnetic_variation,
"magneticVariationValid" : magnetic_variation_valid,
"timestamp" : fix.timestamp,
"valid" : bool(fix.position)
})
def _selectImageProviderCB(self, imageId, requestedSize):
originalImageId = imageId
providerId = ""
#self.log.debug("SELECT IMAGE PROVIDER")
#self.log.debug(imageId)
#self.log.debug(imageId.split("/", 1))
try:
# split out the provider id
providerId, imageId = imageId.split("/", 1)
# get the provider and call its getImage()
return self._imageProviders[providerId].getImage(imageId, requestedSize)
except ValueError: # provider id missing or image ID overall wrong
self.log.error("provider ID missing: %s", originalImageId)
except AttributeError: # missing provider (we are calling methods of None ;) )
if providerId:
self.log.error("image provider for this ID is missing: %s", providerId)
else:
self.log.error("image provider broken, image id: %s", originalImageId)
except Exception: # catch and report the rest
self.log.exception("image loading failed, imageId: %s", originalImageId)
def _tileId2lzxy(self, tileId):
"""Convert tile id string to the "standard" lzxy tuple
:param str tileId: map instance name/layer id/z/x/y
:returns: lzxy tuple
:rtype: tuple
"""
split = tileId.split("/")
# pinchMapId = split[0]
layerId = split[1]
z = int(split[2])
x = int(split[3])
y = int(split[4])
# TODO: local id:layer cache ?
layer = self.modules.mapLayers.getLayerById(layerId)
return layer, z, x, y
def areTilesAvailable(self, tile_ids):
"""Report if tiles are available & request download for those that are not.
:param list tile_ids: list of tile ids to check
:return: a distionary of tile states, True = available, False = will be downloaded
:rtype: dict
"""
available_tiles = {}
for tile_id in tile_ids:
available_tiles[tile_id] = self.isTileAvailable(tile_id)
return available_tiles
def isTileAvailable(self, tileId):
"""Check if tile is available and add download request if not.
NOTE: If automatic tile downloads are disabled tile download
request will not be queued.
:param str tileId: tile identificator
:return: True if the tile is locally available, False if not
:rtype: bool
"""
lzxy = self._tileId2lzxy(tileId)
if self.modules.mapTiles.tileInStorage(lzxy):
return True
else:
self._addTileDownloadRequest(lzxy, tileId)
return False
def _addTileDownloadRequest(self, lzxy, tileId):
"""Add an asynchronous download request, the tile will be
notified once the download is finished or fails
:param string tileId: unique tile id
"""
try:
self.modules.mapTiles.addTileDownloadRequest(lzxy, tileId)
except Exception:
self.log.exception("adding tile download request failed")
def _getStartupValues(self):
""" Return a dict of values needed by the Qt 5 GUI right after startup.
By grouping the requested values in a single dict we reduce the number
of Python <-> QML roundtrips and also make it possible to more easily
get these values asynchronously (values arrive all at the same time,
not in random order at random time).
:returns: a dict gathering the requested values
:rtype dict:
"""
values = {
"modRanaVersion" : self.getModRanaVersion(),
"constants" : self.getConstants(),
"show_quit_button": self.showQuitButton(),
"fullscreen_only": self.modrana.dmod.fullscreen_only,
"should_start_in_fullscreen": self.shouldStartInFullscreen(),
"needs_back_button": self.modrana.dmod.needs_back_button,
"needs_page_background": self.modrana.dmod.needs_page_background,
"lastKnownPos" : self.get("pos", None),
"gpsEnabled" : self.get("GPSEnabled", True),
"posFromFile" : self.get("posFromFile", None),
"nmeaFilePath" : self.get("NMEAFilePath", None),
"layerTree" : self.modules.mapLayers.getLayerTree(),
"dictOfLayerDicts" : self.modules.mapLayers.getDictOfLayerDicts(),
"themesFolderPath" : os.path.abspath(self.modrana.paths.themes_folder_path),
"sailfish" : self.dmod.device_id == "jolla",
"device_type" : self.modrana.dmod.device_type,
"highDPI" : self.highDPI,
"defaultTileStorageType" : self.modrana.dmod.defaultTileStorageType,
"aboutModrana" : self._get_about_info()
}
return values
def _set_screen_size(self, screen_size):
"""A method called by QML to report current screen size in pixels.
:param screen_size: screen width and height in pixels
:type screen_size: a tuple of integers
"""
self._screen_size = screen_size
def _get_about_info(self):
info = self.modules.info
return {
"email_address" : info.email_address,
"website_url" : info.website_url,
"source_repository_url" : info.source_repository_url,
"discussion_url" : info.main_discussion[0],
"translation_url" : info.translation_url,
"pay_pal_url" : info.pay_pal_url,
"flattr_url" : info.flattr_url,
"gratipay_url" : info.gratipay_url,
"bitcoin_address" : info.bitcoin_address
}
class Modules(object):
"""A class that provides access to modRana modules from the QML context,
using the __getattr__ method so that QML can access all modules dynamically
with normal dot notation
"""
def __init__(self, gui):
self._info = None
self._stats = None
self._mapLayers = None
self._storeTiles = None
self.gui = gui
def __getattr__(self, moduleName):
return self.gui.m.get(moduleName, None)
class POI(object):
"""An easy to use POI interface for the QML context"""
def __init__(self, gui):
self.gui = gui
def list_used_categories(self):
db = self.gui.modules.storePOI.db
cat_list = []
for category in db.list_used_categories():
category_id = category[2]
poi_count = len(db.get_all_poi_from_category(category_id)) # do this more efficiently
cat_list.append({
"name" : category[0],
"description" : category[1],
"poi_count" : poi_count,
"category_id" : category_id
})
return cat_list
def _db_changed(self):
"""Notify QML that the POI database has been changed.
This can be used to reload various caches and views.
"""
pyotherside.send("poiDatabaseChanged")
def _new_poi_added(self, new_poi_dict):
"""Notify QML that a new POI has been added"""
pyotherside.send("newPoiAddedToDatabase", new_poi_dict)
def store_poi(self, point_dict):
success = False
db = self.gui.modules.storePOI.db
name = point_dict.get("name")
description = point_dict.get("description", "")
lat = point_dict.get("lat")
lon = point_dict.get("lon")
category_id = point_dict.get("category_id")
# make sure lat & lon is a floating point number
try:
lat = float(lat)
lon = float(lon)
except Exception:
self.gui.log.exception("can't save POI: lat or lon not float")
# default to "Other" if no category is provided
if category_id is None:
category_id = 11 # TODO: this should ge dynamically queried from the database
# sanity check
if name and lon is not None and lat is not None:
poi = point.POI(name=name,
description=description,
lat=lat,
lon=lon,
db_cat_id=category_id)
self.gui.log.info("saving POI: %s", poi)
poi_db_index = db.store_poi(poi)
self.gui.log.info("POI saved")
success = True
# notify QML a new POI was added
new_poi_dict = point2dict(point.POI(name,
description,
lat,
lon,
category_id, poi_db_index))
self._new_poi_added(new_poi_dict)
else:
self.gui.log.error("cant's save poi, missing name or coordinates: %s", point_dict)
if success:
self._db_changed()
return success
def get_all_poi_from_category(self, category_id):
db = self.gui.modules.storePOI.db
poi_list = []
for poi_tuple in db.get_all_poi_from_category(category_id):
# TODO: to this already in poi_db
(name, desc, lat, lon, poi_id) = poi_tuple
poi_dict = point2dict(point.POI(name, desc, lat, lon, category_id, poi_id))
poi_list.append(poi_dict)
return poi_list
def delete_poi(self, poi_db_index):
log.debug("deleting POI with db index %s", poi_db_index)
db = self.gui.modules.storePOI.db
db.delete_poi(poi_db_index)
self._db_changed()
class Search(object):
"""An easy to use search interface for the QML context"""
def __init__(self, gui):
self.gui = gui
self._threadsInProgress = {}
# register the thread status changed callback
threadMgr.threadStatusChanged.connect(self._threadStatusCB)
def search(self, searchId, query, searchPoint=None):
"""Trigger an asynchronous search (specified by search id)
for the given term
:param str query: search query
"""
online = self.gui.m.get("onlineServices", None)
if online:
# construct result handling callback
callback = lambda x : self._searchCB(searchId, x)
# get search function corresponding to the search id
searchFunction = self._getSearchFunction(searchId)
# start the search and remember the search thread id
# so we can use it to track search progress
# (there might be more searches in progress so we
# need to know the unique search thread id)
if searchId == "local" and searchPoint:
pointInstance = point.Point(searchPoint.latitude, searchPoint.longitude)
threadId = searchFunction(query, callback, around=pointInstance)
else:
threadId = searchFunction(query, callback)
self._threadsInProgress[threadId] = searchId
return threadId
def _searchCB(self, searchId, results):
"""Handle address search results
:param list results: address search results
"""
resultList = []
for result in results:
resultList.append(point2dict(result))
resultId = SEARCH_RESULT_PREFIX + searchId
pyotherside.send(resultId, resultList)
thisThread = threading.currentThread()
# remove the finished thread from tracking
if thisThread.name in self._threadsInProgress:
del self._threadsInProgress[thisThread.name]
def cancelSearch(self, threadId):
"""Cancel the given asynchronous search thread"""
log.info("canceling search thread: %s", threadId)
threadMgr.cancel_thread(threadId)
if threadId in self._threadsInProgress:
del self._threadsInProgress[threadId]
def _threadStatusCB(self, threadName, threadStatus):
# check if the event corresponds to some of the
# in-progress search threads
recipient = self._threadsInProgress.get(threadName)
if recipient:
statusId = SEARCH_STATUS_PREFIX + recipient
pyotherside.send(statusId, threadStatus)
def _getSearchFunction(self, searchId):
"""Return the search function object for the given searchId"""
online = self.gui.m.get("onlineServices", None)
if online:
if searchId == "address":
return online.geocodeAsync
elif searchId == "wikipedia":
return online.wikipediaSearchAsync
elif searchId == "local":
return online.localSearchAsync
else:
log.error("search function for id: %s not found", searchId)
return None
else:
log.error("onlineServices module not found")
class ImageProvider(object):
"""PyOtherSide image provider base class"""
def __init__(self, gui):
self.gui = gui
def getImage(self, imageId, requestedSize):
pass
class IconImageProvider(ImageProvider):
"""the IconImageProvider class provides icon images to the QML layer as
QML does not seem to handle .. in the url very well"""
def __init__(self, gui):
ImageProvider.__init__(self, gui)
def getImage(self, imageId, requestedSize):
#log.debug("ICON!")
#log.debug(imageId)
try:
#TODO: theme name caching ?
themeFolder = self.gui.modrana.paths.themes_folder_path
# fullIconPath = os.path.join(themeFolder, imageId)
# the path is constructed like this in QML
# so we can safely just split it like this
splitPath = imageId.split("/")
# remove any Ambiance specific garbage appended by Silica
splitPath[-1] = splitPath[-1].rsplit("?")[0]
fullIconPath = os.path.join(themeFolder, *splitPath)
extension = os.path.splitext(fullIconPath)[1]
# set correct data format based on the extension
if extension.lower() == ".svg":
data_format = pyotherside.format_svg_data
else:
data_format = pyotherside.format_data
if not utils.internal_isfile(fullIconPath):
if splitPath[0] == constants.DEFAULT_THEME_ID:
# already on default theme and icon path does not exist
log.error("Icon not found in default theme:")
log.error(fullIconPath)
return None
else: # try to get the icon from default theme
splitPath[0] = constants.DEFAULT_THEME_ID
fullIconPath = os.path.join(themeFolder, *splitPath)
if not utils.internal_isfile(fullIconPath):
# icon not found even in the default theme
log.error("Icon not found even in default theme:")
log.error(fullIconPath)
return None
# We only set height or else SVG icons would be squished if a square icon
# has been requested but the SVG icon is not square. If just height is
# we the clever SVG handling code (which I wrote ;-) ) will handle this correctly. :)
return utils.internal_get_file_contents(fullIconPath), (-1, requestedSize[1]), data_format
except Exception:
log.exception("icon image provider: loading icon failed, id:\n%s" % imageId)
class TileImageProvider(ImageProvider):
"""
the TileImageProvider class provides images images to the QML map element
"""
def __init__(self, gui):
ImageProvider.__init__(self, gui)
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self._tileNotFoundImage = bytearray([0, 255, 255, 255])
def _firstTimeCB(self):
# connect to the tile downloaded callback so that we can notify
# the QML context that a tile has ben downloaded and should be
# shown on the screen
# NOTE: we need to wait for the firstTime signal as at GUI module init
# the other modules (other than the device module) are not yet initialized
self.gui.modules.mapTiles.tileDownloaded.connect(self._tileDownloadedCB)
def _tileDownloadedCB(self, error, lzxy, tag):
"""Notify the QML context that a tile has been downloaded"""
pinchMapId = tag.split("/")[0]
#log.debug("SENDING: %s %s" % ("tileDownloaded:%s" % pinchMapId, tag))
resoundingSuccess = error == constants.TILE_DOWNLOAD_SUCCESS
fatalError = error == constants.TILE_DOWNLOAD_ERROR
pyotherside.send("tileDownloaded:%s" % pinchMapId, tag, resoundingSuccess, fatalError)
def getImage(self, imageId, requestedSize):
"""
the tile info should look like this:
layerID/zl/x/y
"""
#log.debug("TILE REQUESTED %s" % imageId)
#log.debug(requestedSize)
try:
# split the string provided by QML
split = imageId.split("/")
pinchMapId = split[0]
layerId = split[1]
z = int(split[2])
x = int(split[3])
y = int(split[4])
# TODO: local id:layer cache ?
layer = self.gui.modules.mapLayers.getLayerById(layerId)
# construct the tag
#tag = (pinchMapId, layerId, z, x, y)
#tag = (pinchMapId, layerId, z, x, y)
# get the tile from the tile module
tileData = self.gui.modules.mapTiles.getTile((layer, z, x, y),
asynchronous=True, tag=imageId,
download=False)
imageSize = (256,256)
if tileData is None:
# The tile was not found locally
# * in persistent storage (files/sqlite db)
# * in the tile cache in memory
# An asynchronous tile download request has been added
# automatically, so we just now need to notify the
# QtQuick GUI that it should wait fo the download
# completed signal.
#
# We notify the GUI by returning a 1x1 image.
return self._tileNotFoundImage, (1,1), pyotherside.format_argb32
#log.debug("%s NOT FOUND" % imageId)
#log.debug("RETURNING STUFF %d %s" % (imageSize[0], imageId))
return bytearray(tileData), imageSize, pyotherside.format_data
except Exception:
log.error("tile image provider: loading tile failed")
log.error(imageId)
log.error(requestedSize)
log.exception("tile image provider exception")
class MapTiles(object):
def __init__(self, gui):
self.gui = gui
@property
def tileserverPort(self):
port = self.gui._getTileserverPort()
if port:
return port
else: # None,0 == 0 in QML
return 0
def loadTile(self, layerId, z, x, y):
"""
load a given tile from storage and/or from the network
True - tile already in storage or in memory
False - tile download in progress, retry in a while
"""
# log.debug(layerId, z, x, y)
if self.gui.mapTiles.tileInMemory(layerId, z, x, y):
# log.debug("available in memory")
return True
elif self.gui.mapTiles.tileInStorage(layerId, z, x, y):
# log.debug("available in storage")
return True
else: # not in memory or storage
# add a tile download request
self.gui.mapTiles.addTileDownloadRequest(layerId, z, x, y)
# log.debug("downloading, try later")
return False
class _Search(object):
_addressSignal = signal.Signal()
changed = signal.Signal()
test = signal.Signal()
def __init__(self, gui):
self.gui = gui
self._addressSearchResults = None
self._addressSearchStatus = "Searching..."
self._addressSearchInProgress = False
self._addressSearchThreadName = None
self._localSearchResults = None
self._wikipediaSearchResults = None
self._routeSearchResults = None
self._POIDBSearchResults = None
# why are wee keeping our own dictionary of wrapped
# objects and not just returning a newly wrapped object on demand ?
# -> because PySide (1.1.1) segfaults if we don't hold any reference
# on the object returned :)
# register the thread status changed callback
threadMgr.threadStatusChanged.connect(self._threadStatusCB)
def _threadStatusCB(self, threadName, threadStatus):
if threadName == self._addressSearchThreadName:
self._addressSearchStatus = threadStatus
self._addressSignal()
def address(self, address):
"""Trigger an asynchronous address search for the given term
:param address: address search query
:type address: str
"""
online = self.gui.m.get("onlineServices", None)
if online:
self._addressSearchThreadName = online.geocodeAsync(
address, self._addressSearchCB
)
self._addressSearchInProgress = True
self._addressSignal()
def addressCancel(self):
"""Cancel the asynchronous address search"""
threadMgr.cancel_thread(self._addressSearchThreadName)
self._addressSearchInProgress = False
self._addressSearchStatus = "Searching..."
self._addressSignal()
def _addressSearchCB(self, results):
"""Replace old address search results (if any) with
new (wrapped) results
:param results: address search results
:type results: list
"""
#self.gui._addressSearchListModel.set_objects(
# wrapList(results, wrappers.PointWrapper)
#)
self._addressSearchInProgress = False
self._addressSignal.emit()
class ModRana(object):
"""
core modRana functionality
"""
def __init__(self, modrana, gui):
self.modrana = modrana
self.gui = gui
self.modrana.watch("mode", self._modeChangedCB)
self.modrana.watch("theme", self._themeChangedCB)
self._theme = Theme(gui)
# mode
def _getMode(self):
return self.modrana.get('mode', "car")
def _setMode(self, mode):
self.modrana.set('mode', mode)
modeChanged = signal.Signal()
def _modeChangedCB(self, *args):
"""notify when the mode key changes in options"""
self.modeChanged()
class Theme(object):
"""modRana theme handling"""
def __init__(self, gui):
self.gui = gui
# connect to the first time signal
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self.themeModule = None
self._themeDict = {}
self.colors = None
self.modrana = self.gui.modrana
self.themeChanged.connect(self._notifyQMLCB)
themeChanged = signal.Signal()
def _firstTimeCB(self):
# we need the theme module
self.themeModule = self.gui.m.get('theme')
theme = self.themeModule.theme
# reload the theme dict so that
# the dict is up to date and
# then trigger the changed signal
# and give it the current theme dict
self.themeChanged(self._reloadTheme(theme))
# connect to the core theme-modules theme-changed signal
self.themeModule.themeChanged.connect(self._themeChangedCB)
def _themeChangedCB(self, newTheme):
""" Callback from the core theme module
- reload theme and trigger our own themeChanged signal
:param newTheme: new theme from the core theme module
:type newTheme: Theme
"""
self.themeChanged(self._reloadTheme(newTheme))
def _notifyQMLCB(self, newTheme):
""" Notify the QML context that the modRana theme changed
:param newTheme: the new theme
:type newTheme: dict
"""
pyotherside.send("themeChanged", newTheme)
@property
def themeId(self):
return self._themeDict.get("id")
@themeId.setter
def themeId(self, themeId):
self.modrana.set('theme', themeId)
@property
def theme(self):
return self._themeDict
def _reloadTheme(self, theme):
"""Recreate the theme dict from the new theme object
:param theme: new modRana Theme object instance
:type theme: Theme
"""
themeDict = {
"id" : theme.id,
"name" : theme.name,
"color" : {
"main_fill" : theme.getColor("main_fill", "#92aaf3"),
"main_highlight_fill" : theme.getColor("main_highlight_fill", "#f5f5f5"),
"icon_grid_toggled" : theme.getColor("icon_grid_toggled", "#c6d1f3"),
"icon_button_normal" : theme.getColor("icon_button_normal", "#c6d1f3"),
"icon_button_toggled" : theme.getColor("icon_button_toggled", "#3c60fa"),
"icon_button_text" : theme.getColor("icon_button_text", "black"),
"page_background" : theme.getColor("page_background", "black"),
"list_view_background" : theme.getColor("list_view_background", "#d2d2d2d"),
"page_header_text" : theme.getColor("page_header_text", "black"),
}
}
self._themeDict = themeDict
return themeDict
class Tracklogs(object):
"""Some tracklog specific functionality"""
SAILFISH_TRACKLOGS_SYMLINK_NAME = "modrana_tracklogs"
SAILFISH_SYMLINK_PATH = os.path.join(paths.get_home_path(), "Documents", SAILFISH_TRACKLOGS_SYMLINK_NAME)
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self._sendUpdates = True
def _firstTimeCB(self):
# connect to the tracklog update signal, so that we can send
# track logging state updates to the GUI
self.gui.modules.tracklog.tracklogUpdated.connect(self._sendUpdateCB)
def _sendUpdateCB(self):
"""Tracklog has been updated, send the updated info dict to GUI"""
if self._sendUpdates:
pyotherside.send("tracklogUpdated", self.gui.modules.tracklog.getStatusDict())
def setSendUpdates(self, value):
"""Set if tracklog updates should be sent to the GUI layer or not.
This is used to disable updates when the track recording page is not visible.
"""
self._sendUpdates = value
if value:
self.gui.log.debug("tracklog: enabling logging status updates")
else:
self.gui.log.debug("tracklog: disabling logging status updates")
def sailfishSymlinkExists(self):
"""Report if the easy access symlink on Sailfish OS for tracklogs exists
:returns: True if the symlink exists, False if not
:rtype: bool
"""
return os.path.islink(self.SAILFISH_SYMLINK_PATH)
def createSailfishSymlink(self):
"""Create symlink from the actual tracklogs folder in the XDG path
to ~/Documents for easier access to the tracklogs by the users
"""
self.gui.log.info("tracklogs: creating sailfish tracklogs symlink")
if self.sailfishSymlinkExists():
self.gui.log.warning("tracklogs: the Sailfish tracklogs symlink already exists")
else:
try:
os.symlink(self.gui.modrana.paths.tracklog_folder_path, self.SAILFISH_SYMLINK_PATH)
self.gui.log.info("tracklogs: sailfish tracklogs symlink created")
except Exception:
self.gui.log.exception("tracklogs: sailfish tracklogs symlink creation failed")
def removeSailfishSymlink(self):
"""Remove the easy-access Sailfish OS symlink"""
self.gui.log.info("tracklogs: removing sailfish tracklogs symlink")
if not self.sailfishSymlinkExists():
self.gui.log.warning("tracklogs: the Sailfish tracklogs symlink does not exist")
else:
try:
os.remove(self.SAILFISH_SYMLINK_PATH)
self.gui.log.info("tracklogs: sailfish tracklogs symlink removed")
except Exception:
self.gui.log.exception("tracklogs: sailfish tracklogs symlink removed")
class Routing(object):
"""Qt 5 GUI specific stuff for routing support"""
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._first_time_cb)
self._sendUpdates = True
def request_route(self, route_request):
waypoints = []
self.gui.log.debug("REQUEST:")
self.gui.log.debug(route_request)
for waypoint_dict in route_request["waypoints"]:
waypoint = point.Waypoint(lat=waypoint_dict["latitude"],
lon=waypoint_dict["longitude"],
heading=waypoint_dict["heading"])
waypoints.append(waypoint)
self.gui.modules.route.waypoints_route(waypoints)
def _first_time_cb(self):
self.gui.modules.route.routing_done.connect(self._routing_done_cb)
def _routing_done_cb(self, result):
if result and result.returnCode == constants.ROUTING_SUCCESS:
route_points = result.route.points_lle
message_points = result.route.message_points
message_points_llemi = []
for mp in message_points:
message_points_llemi.append(mp.llemi)
# also add a point for the route end
if route_points:
lastPoint = route_points[-1]
lastPointMessage = "You <b>should</b> be near the destination."
message_points_llemi.append((lastPoint[0], lastPoint[1],
lastPoint[2], lastPointMessage))
# TODO: this should really be done in the route module itself somehow
self.gui.modules.route.process_and_save_directions(result.route)
self.gui.log.debug("routing successful")
pyotherside.send("routeReceived",
{"points" : route_points,
"messagePoints" : message_points_llemi}
)
else:
error_message = constants.ROUTING_FAILURE_MESSAGES.get(result.returnCode, "Routing failed.")
self.gui.log.debug(error_message)
class Navigation(object):
"""Qt 5 GUI specific stuff for turn by turn navigation support"""
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self.tbt = None
def _firstTimeCB(self):
# the module machinery is not yet really setup at init time,
# so we need to do stuff involving modRana modules only
# at the first time signal
self.tbt = self.gui.modules.turnByTurn
# connect to signals
self.tbt.navigation_started.connect(self._navigation_started_cb)
self.tbt.navigation_stopped.connect(self._navigation_stopped_cb)
self.tbt.destination_reached.connect(self._destination_reached_cb)
self.tbt.rerouting_triggered.connect(self._rerouting_triggered_cb)
self.tbt.current_step_changed.connect(self._current_step_changed_cb)
def _navigation_started_cb(self):
pyotherside.send("navigationStarted")
def _navigation_stopped_cb(self):
pyotherside.send("navigationStopped")
def _destination_reached_cb(self):
pyotherside.send("navigationDestionationReached")
def _rerouting_triggered_cb(self):
pyotherside.send("navigationReroutingTriggered")
def _current_step_changed_cb(self, step_point):
step_dict = {
"message" : step_point.description,
"latitude" : step_point.lat,
"longitude" : step_point.lon,
"icon" : step_point.icon,
}
pyotherside.send("navigationCurrentStepChanged", step_dict)
def start(self):
self.tbt.start_tbt()
def stop(self):
self.tbt.stop_tbt()
| gpl-3.0 | 6,526,115,789,858,086,000 | 36.592396 | 109 | 0.600936 | false |
jupyter/oauthenticator | oauthenticator/globus.py | 1 | 8166 | """
Custom Authenticator to use Globus OAuth2 with JupyterHub
"""
import os
import pickle
import base64
from tornado import web
from tornado.auth import OAuth2Mixin
from tornado.web import HTTPError
from traitlets import List, Unicode, Bool
from jupyterhub.handlers import LogoutHandler
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.utils import url_path_join
from .oauth2 import OAuthLoginHandler, OAuthenticator
try:
import globus_sdk
except:
raise ImportError('globus_sdk is not installed, please see '
'"globus-requirements.txt" for using Globus oauth.')
class GlobusMixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = 'https://auth.globus.org/v2/oauth2/authorize'
class GlobusLoginHandler(OAuthLoginHandler, GlobusMixin):
pass
class GlobusLogoutHandler(LogoutHandler):
"""
Handle custom logout URLs and token revocation. If a custom logout url
is specified, the 'logout' button will log the user out of that identity
provider in addition to clearing the session with Jupyterhub, otherwise
only the Jupyterhub session is cleared.
"""
async def get(self):
user = self.get_current_user()
if user:
if self.authenticator.revoke_tokens_on_logout:
self.clear_tokens(user)
self.clear_login_cookie()
if self.authenticator.logout_redirect_url:
self.redirect(self.authenticator.logout_redirect_url)
else:
super().get()
async def clear_tokens(self, user):
if not self.authenticator.revoke_tokens_on_logout:
return
state = await user.get_auth_state()
if state:
self.authenticator.revoke_service_tokens(state.get('tokens'))
self.log.info('Logout: Revoked tokens for user "{}" services: {}'
.format(user.name, ','.join(state['tokens'].keys())))
state['tokens'] = ''
user.save_auth_state(state)
class GlobusOAuthenticator(OAuthenticator):
"""The Globus OAuthenticator handles both authorization and passing
transfer tokens to the spawner. """
login_service = 'Globus'
login_handler = GlobusLoginHandler
logout_handler = GlobusLogoutHandler
identity_provider = Unicode(help="""Restrict which institution a user
can use to login (GlobusID, University of Hogwarts, etc.). This should
be set in the app at developers.globus.org, but this acts as an additional
check to prevent unnecessary account creation.""").tag(config=True)
def _identity_provider_default(self):
return os.getenv('IDENTITY_PROVIDER', 'globusid.org')
exclude_tokens = List(
help="""Exclude tokens from being passed into user environments
when they start notebooks, Terminals, etc."""
).tag(config=True)
def _exclude_tokens_default(self):
return ['auth.globus.org']
def _scope_default(self):
return [
'openid',
'profile',
'urn:globus:auth:scope:transfer.api.globus.org:all'
]
allow_refresh_tokens = Bool(
help="""Allow users to have Refresh Tokens. If Refresh Tokens are not
allowed, users must use regular Access Tokens which will expire after
a set time. Set to False for increased security, True for increased
convenience."""
).tag(config=True)
def _allow_refresh_tokens_default(self):
return True
globus_local_endpoint = Unicode(help="""If Jupyterhub is also a Globus
endpoint, its endpoint id can be specified here.""").tag(config=True)
def _globus_local_endpoint_default(self):
return os.getenv('GLOBUS_LOCAL_ENDPOINT', '')
logout_redirect_url = \
Unicode(help="""URL for logging out.""").tag(config=True)
def _logout_redirect_url_default(self):
return os.getenv('LOGOUT_REDIRECT_URL', '')
revoke_tokens_on_logout = Bool(
help="""Revoke tokens so they cannot be used again. Single-user servers
MUST be restarted after logout in order to get a fresh working set of
tokens."""
).tag(config=True)
def _revoke_tokens_on_logout_default(self):
return False
async def pre_spawn_start(self, user, spawner):
"""Add tokens to the spawner whenever the spawner starts a notebook.
This will allow users to create a transfer client:
globus-sdk-python.readthedocs.io/en/stable/tutorial/#tutorial-step4
"""
spawner.environment['GLOBUS_LOCAL_ENDPOINT'] = \
self.globus_local_endpoint
state = await user.get_auth_state()
if state:
globus_data = base64.b64encode(
pickle.dumps(state)
)
spawner.environment['GLOBUS_DATA'] = globus_data.decode('utf-8')
def globus_portal_client(self):
return globus_sdk.ConfidentialAppAuthClient(
self.client_id,
self.client_secret)
async def authenticate(self, handler, data=None):
"""
Authenticate with globus.org. Usernames (and therefore Jupyterhub
accounts) will correspond to a Globus User ID, so [email protected]
will have the 'foouser' account in Jupyterhub.
"""
code = handler.get_argument("code")
redirect_uri = self.get_callback_url(self)
client = self.globus_portal_client()
client.oauth2_start_flow(
redirect_uri,
requested_scopes=' '.join(self.scope),
refresh_tokens=self.allow_refresh_tokens
)
# Doing the code for token for id_token exchange
tokens = client.oauth2_exchange_code_for_tokens(code)
id_token = tokens.decode_id_token(client)
# It's possible for identity provider domains to be namespaced
# https://docs.globus.org/api/auth/specification/#identity_provider_namespaces # noqa
username, domain = id_token.get('preferred_username').split('@', 1)
if self.identity_provider and domain != self.identity_provider:
raise HTTPError(
403,
'This site is restricted to {} accounts. Please link your {}'
' account at {}.'.format(
self.identity_provider,
self.identity_provider,
'globus.org/app/account'
)
)
return {
'name': username,
'auth_state': {
'client_id': self.client_id,
'tokens': {
tok: v for tok, v in tokens.by_resource_server.items()
if tok not in self.exclude_tokens
},
}
}
def revoke_service_tokens(self, services):
"""Revoke live Globus access and refresh tokens. Revoking inert or
non-existent tokens does nothing. Services are defined by dicts
returned by tokens.by_resource_server, for example:
services = { 'transfer.api.globus.org': {'access_token': 'token'}, ...
<Additional services>...
}
"""
client = self.globus_portal_client()
for service_data in services.values():
client.oauth2_revoke_token(service_data['access_token'])
client.oauth2_revoke_token(service_data['refresh_token'])
def get_callback_url(self, handler=None):
"""
Getting the configured callback url
"""
if self.oauth_callback_url is None:
raise HTTPError(500,
'No callback url provided. '
'Please configure by adding '
'c.GlobusOAuthenticator.oauth_callback_url '
'to the config'
)
return self.oauth_callback_url
def logout_url(self, base_url):
return url_path_join(base_url, 'logout')
def get_handlers(self, app):
return super().get_handlers(app) + [(r'/logout', self.logout_handler)]
class LocalGlobusOAuthenticator(LocalAuthenticator, GlobusOAuthenticator):
"""A version that mixes in local system user creation"""
pass
| bsd-3-clause | 2,794,234,210,884,525,000 | 35.293333 | 93 | 0.624051 | false |
SIU-CS/J-JAM-production | mhapsite/mhap/forms.py | 1 | 3810 | """
Contains imports of forms from django and captha and our custom models.
Has logic for form validation as well.
"""
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms.extras.widgets import SelectDateWidget
from captcha.fields import CaptchaField
from .models import Post, Profile
BIRTH_YEAR_CHOICES = tuple([str(date) for date in range (1980, 2000)])
#print BIRTH_YEAR_CHOICES
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"title",
"content",
"secret"
]
#http://stackoverflow.com/questions/28458770/how-can-create-a-model-form-in-django-with-a-one-to-one-relation-with-another-mo
#http://stackoverflow.com/questions/11923317/creating-django-forms
class PasswordForm(forms.Form):
"""
Extends forms.Form and is essentially a password form we use for inputting password twice
"""
password1 = forms.CharField(label=("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=("Password (again)"), widget=forms.PasswordInput)
def clean(self):
print "IN CLEAN"
print self.errors
password_two = self.cleaned_data.get('password2')
password_one = self.cleaned_data.get('password1')
#print password_one,"PASSWORD !"
#print password_one,password_two
if not password_two:
raise forms.ValidationError("Must confirm your password")
if password_one != password_two:
raise forms.ValidationError("Passwords dont match")
valid = self.user.check_password(self.cleaned_data['password1'])
print self.user
if not valid:
raise forms.ValidationError("Password Incorrect")
print self.errors
return valid
def __init__(self, user=None, *args, **kwargs):
self.user = user
print self.user, "IN INIT"
super(PasswordForm, self).__init__(*args, **kwargs)
class ChatForm(forms.Form):
"""
Extends forms.Form and is essentially a form for inputting chat messages
"""
chat = forms.CharField(label=("Input"))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ("birth_date",)
widgets = {
'birth_date':SelectDateWidget(years=BIRTH_YEAR_CHOICES)
}
#http://stackoverflow.com/questions/23692533/django-datefield-object-has-no-attribute-is-hidden
class UserForm(forms.ModelForm):
#password1 = forms.PasswordInput()
#password2=forms.PasswordInput()
#http://stackoverflow.com/questions/4939737/cant-add-field-to-modelform-at-init?rq=1
class Meta:
model = User
fields = ('username',)
class SignUpForm(UserCreationForm):
print "IN SIGNUP"
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
birth_date = forms.DateField(help_text='Required. Format: YYYY-MM-DD')
class Meta:
print "IN META"
model = User
fields = ('username', 'email', 'birth_date','password1', 'password2', )
#http://stackoverflow.com/questions/1160030/how-to-make-email-field-unique-in-model-user-from-contrib-auth-in-django
def clean_email(self):
print "IN UNIQUE EMAIL"
email = self.cleaned_data.get('email')
print email
username = self.cleaned_data.get('username')
print username
print User.objects.filter(email=email).exclude(username=username)
if email and User.objects.filter(email=email).exclude(username=username).count():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class AxesCaptchaForm(forms.Form):
captcha = CaptchaField()
| gpl-3.0 | -1,176,780,099,395,129,900 | 31.02521 | 125 | 0.667979 | false |
curiouserrandy/Python-Utils | dataflow.py | 1 | 11123 | import operator
### Setting up a quick dataflow structure model for handling
### Originally inspired for handling transformations in importing mail
### messages for the mail database project (see Projects/MailSys) but
### useful enough that I wanted it globa.
### XXX: There probably are improvements to make in the base abstraction
### around handling multiple intput and output links. Oh, well.
### XXX: Some logging in the base abstraction would be good. For the
### current implementation, central logging showing input and output
### along each link would be grand. I think this requires inputs to
### pass through the base class and call up, which is fine.
### Naming conventions for internally/externally called functions and
### should/shouldn't be overridden:
### If they're external I want them to be easy to call, so I don't want
### to distinguish override/non-override in the name. I'll follow that
### pattern internally as well. Internal functions will have an _ prepended.
### XXX: Should have classes export schema and have that schema checked on
### linkage.
### XXX: You may want tagged inputs and outputs. Heck, you may want
### both tagged and numbered outputs; numbered for multiple items of
### the same type, and tagged for different categories.
### XXX: Specify interface more completely (specifically to superclasses, and
### to external functions).
### XXX: Might want to think about operator overloading to link DFNs
### (possibly mimic building a list; stream DFN container? Any good
### syntactic sugar for splits?)
### XXX: I need a clearer distinction in states between "figuring out
### linkages" and "flowing". I need to know whether I can trust
### the linkage info.
### XXX: Why am I assuming a single input before inputs get attached?
class DataflowNode(object):
"""Base class for node in a dataflow network. Takes an input record,
does some type of transformation on it, and outputs some other record.
Default action is just to pass things through.
Note that only input, _localEos, and _validate_link are intended to be overridden by
descendants."""
def __init__(self):
self.outputFunctions = []
self.outputEos = []
# Default to a single input. If there are more from other DFNs,
# the array will expand automatically, and it currently doesn't
# make sense to have no inputs for a DFN.
self.upstreamInfo = [] # Tuples of obj, output#
self.eosSeen = [False,]
self.shutdown = False
# input and eos are both called by both user and internal links
def input(self, record, inputLink=0):
"Default behavior is assertion exception; descendants should override."
assert False, "DataflowNode class not meant to be used directly."
def eos(self, inputLink=0):
self.eosSeen[inputLink] = True
if reduce(operator.and_, filter(lambda x: operator.is_not(x, None),
self.eosSeen)):
self._localEos()
for f in self.outputEos:
if f: f()
def setupInput(self, inputLink):
"""Setup a specific external input for multi-external input
nodes."""
assert inputLink > 1
self.eosSeen += \
[None,] * max(0,inputLink - len(self.eosSeen) + 1)
self.eosSeen[inputLink] = False
def _firstOpenOutput(self):
"""Used by subclasses to do auto-linking of multiple outputs."""
for i in range(len(self.outputFunctions)):
if self.outputFunctions is None:
return i
return len(self.outputFunctions)
def _validate_link(self, linknum, input_p):
"""Should be overridden if only some links are valid."""
return True
def _localEos(self):
"""Internal function called when eos has been seen on all inputs.
Descendants may override to get this notification."""
pass
def _output(self, record, links=None):
"""Internal method for outputing a record conditional on output func.
links is a list of outputs to output on; defaults to the specical value
None, meaning all of them."""
if links is None: links = range(len(self.outputFunctions))
for l in links:
if self.outputFunctions[l]:
self.outputFunctions[l](record)
def _shutdown(self):
"""Inform upstream nodes that we're going away and they shouldn't
bother us anymore. Note that this is independent from sending
eos downstream."""
self.shutdown = True
for usn in self.upstreamInfo:
(node, port) = usn
node._breakPipe(port)
def _breakPipe(self, port):
self.outputFunctions[port] = None
self.outputEos[port] = None
if not filter(None, self.outputFunctions):
# We're done; we've got no more customers
self._shutdown()
@staticmethod
def link(outputNode, inputNode, outputLink=0, inputLink=0):
assert outputNode._validate_link(outputLink, False), (outputNode, outputLink)
assert inputNode._validate_link(inputLink, True), (inputNode, inputLink)
outputNode.outputFunctions += \
[None,] * max(0,outputLink - len(outputNode.outputFunctions) + 1)
outputNode.outputEos += \
[None,] * max(0,outputLink - len(outputNode.outputEos) + 1)
inputNode.eosSeen += \
[None,] * max(0,inputLink - len(inputNode.eosSeen) + 1)
inputNode.upstreamInfo += \
[None,] * max(0,inputLink - len(inputNode.upstreamInfo) + 1)
outputNode.outputFunctions[outputLink] = \
lambda record: inputNode.input(record, inputLink=inputLink)
outputNode.outputEos[outputLink] = \
lambda: inputNode.eos(inputLink=inputLink)
inputNode.eosSeen[inputLink] = False
inputNode.upstreamInfo[inputLink] = (outputNode, outputLink)
# Utility dataflow classes
class StreamDFN(DataflowNode):
"""Easy class for binding together a single list of data flow nodes."""
def __init__(self):
DataflowNode.__init__(self)
self.start = None
self.end = None
def prepend(self, node):
if self.start:
DataflowNode.link(node, self.start)
self.start = node
else:
self.start = self.end = node
def append(self, node):
if self.end:
DataflowNode.link(self.end, node)
self.end = node
else:
self.start = self.end = node
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
assert inputLink == 0
if self.start:
self.start.input(record)
else:
self._output(record)
def _localEos(self):
if self.start:
self.start.eos()
class SplitDFN(DataflowNode):
"""Split the input into as many outputs as are linked."""
def __init__(self):
DataflowNode.__init__(self)
def _validate_link(self, linknum, input_p):
return linknum == 0 or not input_p # One input, any num outputs
def input(self, record, inputLink=0):
self._output(record)
def addOutput(self, downstreamNode, downstreamlink=0):
DataflowNode.link(self, downstreamNode, self._firstOpenOutput(),
downstreamlink)
class FilterDFN(DataflowNode):
"""Filters input through a specified function."""
def __init__(self, filterFunc=None, eosFunc=None):
DataflowNode.__init__(self)
self.filterFunc = filterFunc
self.eosFunc = eosFunc
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, 0-1 outputs.
def input(self, record, inputLink=0):
if self.filterFunc: self._output(self.filterFunc(record))
def _localEos(self):
if self.eosFunc: self.eosFunc()
class SinkDFN(FilterDFN):
"""Accepts input and dumps it to a specified function."""
# Implemented through FilterDFN with no outputs.
def _validate_link(self, linknum, input_p):
return input_p and linknum ==0 # Any input, no outputs
class RecordIntervalDFN(DataflowNode):
"""Only transmit a specified interval of records from input to output."""
def __init__(self, interval):
"""Only transmit records whose record number falls in the given
interval from input to output. -1 for the end of the interval means
no limit."""
DataflowNode.__init__(self)
assert isinstance(interval[0], int) and isinstance(interval[1], int)
self.interval = interval
self.recordNum = 0
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
if (self.recordNum >= self.interval[0]
and (self.interval[1] == -1 or self.recordNum < self.interval[1])):
self._output(record)
self.recordNum += 1
if self.interval[1] != -1 and self.recordNum >= self.interval[1]:
self.eos()
self._shutdown()
class ByteIntervalDFN(DataflowNode):
"""Only transmit a specified byte interval (where input/output is in text strings)."""
def __init__(self, interval):
"""Only transmit bytes whose position in the stream falls in the given
interval from input to output. -1 for the end of the interval means
no limit."""
DataflowNode.__init__(self)
self.interval = interval
self.byteNum = 0
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
strlen = len(record)
# Map the byte interval into the string coords
# Limit by string boundaries
startInStr = self.interval[0] - self.byteNum
startInStr = min(strlen, max(0, startInStr))
endInStr = self.interval[1] - self.byteNum if self.interval[1] != -1 else strlen
endInStr = min(strlen, max(0, endInStr))
self.byteNum += len(record)
if endInStr - startInStr > 0:
self._output(record[startInStr:endInStr])
if self.interval[1] != -1 and self.byteNum > self.interval[1]:
self.eos()
self._shutdown()
class BatchRecordDFN(DataflowNode):
"""Pass on records input->output in batches. A batchsize of 0 means to
wait until end of stream."""
def __init__(self, batchsize):
DataflowNode.__init__(self)
self.batchsize = batchsize
self.recordlist = []
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def _push(self):
self._output(self.recordlist)
self.recordlist = []
def input(self, record, inputLink=0):
self.recordlist += (record,)
if self.batchsize and len(self.recordlist) >= self.batchsize:
self._push()
def _localEos(self):
if self.recordlist: self._push()
| gpl-2.0 | -3,249,355,095,485,375,000 | 37.487889 | 90 | 0.636879 | false |
guileschool/BEAGLEBONE-tutorials | BBB-firmware/u-boot-v2018.05-rc2/test/py/tests/test_log.py | 1 | 3623 | # Copyright (c) 2016, Google Inc.
#
# SPDX-License-Identifier: GPL-2.0+
#
# U-Boot Verified Boot Test
"""
This tests U-Boot logging. It uses the 'log test' command with various options
and checks that the output is correct.
"""
import pytest
LOGL_FIRST, LOGL_WARNING, LOGL_INFO = (0, 4, 6)
@pytest.mark.buildconfigspec('log')
def test_log(u_boot_console):
"""Test that U-Boot logging works correctly."""
def check_log_entries(lines, mask, max_level=LOGL_INFO):
"""Check that the expected log records appear in the output
Args:
lines: iterator containing lines to check
mask: bit mask to select which lines to check for:
bit 0: standard log line
bit 1: _log line
max_level: maximum log level to expect in the output
"""
for i in range(max_level):
if mask & 1:
assert 'log_run() log %d' % i == lines.next()
if mask & 3:
assert 'func() _log %d' % i == lines.next()
def run_test(testnum):
"""Run a particular test number (the 'log test' command)
Args:
testnum: Test number to run
Returns:
iterator containing the lines output from the command
"""
with cons.log.section('basic'):
output = u_boot_console.run_command('log test %d' % testnum)
split = output.replace('\r', '').splitlines()
lines = iter(split)
assert 'test %d' % testnum == lines.next()
return lines
def test0():
lines = run_test(0)
check_log_entries(lines, 3)
def test1():
lines = run_test(1)
check_log_entries(lines, 3)
def test2():
lines = run_test(2)
def test3():
lines = run_test(3)
check_log_entries(lines, 2)
def test4():
lines = run_test(4)
assert next(lines, None) == None
def test5():
lines = run_test(5)
check_log_entries(lines, 2)
def test6():
lines = run_test(6)
check_log_entries(lines, 3)
def test7():
lines = run_test(7)
check_log_entries(lines, 3, LOGL_WARNING)
def test8():
lines = run_test(8)
check_log_entries(lines, 3)
def test9():
lines = run_test(9)
check_log_entries(lines, 3)
# TODO([email protected]): Consider structuring this as separate tests
cons = u_boot_console
test0()
test1()
test2()
test3()
test4()
test5()
test6()
test7()
test8()
test9()
@pytest.mark.buildconfigspec('log')
def test_log_format(u_boot_console):
"""Test the 'log format' and 'log rec' commands"""
def run_with_format(fmt, expected_output):
"""Set up the log format and then write a log record
Args:
fmt: Format to use for 'log format'
expected_output: Expected output from the 'log rec' command
"""
output = cons.run_command('log format %s' % fmt)
assert output == ''
output = cons.run_command('log rec arch notice file.c 123 func msg')
assert output == expected_output
cons = u_boot_console
with cons.log.section('format'):
run_with_format('all', 'NOTICE.arch,file.c:123-func() msg')
output = cons.run_command('log format')
assert output == 'Log format: clFLfm'
run_with_format('fm', 'func() msg')
run_with_format('clfm', 'NOTICE.arch,func() msg')
run_with_format('FLfm', 'file.c:123-func() msg')
run_with_format('lm', 'NOTICE. msg')
run_with_format('m', 'msg')
| mit | 7,526,113,073,815,767,000 | 27.527559 | 78 | 0.568038 | false |
czervenka/gapi | gapi/api/bigquery.py | 1 | 3051 | # Copyright 2013 Lukas Lukovsky <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gapi.exceptions import GoogleApiHttpException
from ..client import ApiService, ApiResource
class Service(ApiService):
_base_url = 'https://www.googleapis.com/bigquery/v2'
_default_scope = 'https://www.googleapis.com/auth/bigquery'
@property
def _resources(self):
return [DataSets, Jobs, JobsQueries, Tables]
ApiService._services['bigquery'] = Service
class DataSets(ApiResource):
project_id = None
_name = 'datasets'
_methods = ['list']
@property
def _base_path(self):
return '/projects/%s/datasets' % self.project_id
class Jobs(ApiResource):
project_id = None
_name = 'jobs'
_methods = 'get', 'insert', 'list'
@property
def _base_path(self):
return '/projects/%s/jobs' % self.project_id
class JobsQueries(ApiResource):
project_id = None
_name = 'jobs_queries'
_methods = 'getQueryResults', 'query'
@property
def _base_path(self):
return '/projects/%s/queries' % self.project_id
def _api_getQueryResults(self, id, **kwargs):
return ApiResource._api_get(self, id, method='GET', params=kwargs)
def _api_query(self, query, **kwargs):
return self._service.fetch(self._get_item_url({}), method='POST', payload=query, params=kwargs)
class Tables(ApiResource):
project_id = None
_name = 'tables'
_methods = 'get', 'update', 'insert_all'
@property
def _base_path(self):
return '/projects/%s' % self.project_id
def _get_item_url(self, dataset_id, table_id):
return '%s/datasets/%s/tables/%s' % (self._base_url, dataset_id, table_id)
def _api_get(self, dataset_id, table_id, **kwargs):
return self._service.fetch(
self._get_item_url(dataset_id, table_id), method='GET', params=kwargs)
def _api_update(self, dataset_id, table_id, body, **kwargs):
return self._service.fetch(
self._get_item_url(dataset_id, table_id), method='PUT', payload=body, params=kwargs)
def _api_insert_all(self, dataset_id, table_id, rows, **kwargs):
body = dict()
body['kind'] = "bigquery#tableDataInsertAllRequest"
body['rows'] = rows
res = self._service.fetch(
self._get_item_url(dataset_id, table_id) + "/insertAll", method='POST', payload=body, params=kwargs)
if 'insertErrors' in res:
raise GoogleApiHttpException(res['insertErrors'])
return res
| apache-2.0 | 6,696,008,212,205,719,000 | 29.51 | 112 | 0.656178 | false |
felipenaselva/felipe.repository | script.module.resolveurl/lib/resolveurl/plugins/watchers.py | 1 | 1793 | """
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
resolveurl XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class WatchersResolver(ResolveUrl):
name = "watchers"
domains = ['watchers.to']
pattern = '(?://|\.)(watchers\.to)/(?:embed-)?([a-zA-Z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA}
html = self.net.http_GET(web_url, headers=headers).content
if html:
packed = helpers.get_packed_data(html)
headers.update({'Referer': web_url})
sources = helpers.parse_sources_list(packed)
if sources: return helpers.pick_source(sources) + helpers.append_headers(headers)
raise ResolverError('File not found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| gpl-2.0 | 2,952,388,391,311,143,000 | 36.354167 | 93 | 0.660904 | false |
westerncapelabs/django-grs-gatewaycms | quiz/migrations/0001_initial.py | 1 | 8276 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Quiz'
db.create_table(u'quiz_quiz', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('description', self.gf('django.db.models.fields.CharField')(max_length=163)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'quiz', ['Quiz'])
# Adding model 'Question'
db.create_table(u'quiz_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quiz_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='q_quiz_id', to=orm['quiz.Quiz'])),
('question', self.gf('django.db.models.fields.CharField')(max_length=163)),
))
db.send_create_signal(u'quiz', ['Question'])
# Adding model 'Answer'
db.create_table(u'quiz_answer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='question_id', to=orm['quiz.Question'])),
('answer', self.gf('django.db.models.fields.CharField')(max_length=156)),
('response', self.gf('django.db.models.fields.CharField')(max_length=156)),
('correct', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'quiz', ['Answer'])
# Adding model 'FinalResponse'
db.create_table(u'quiz_finalresponse', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quiz_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fr_quiz_id', to=orm['quiz.Quiz'])),
('text', self.gf('django.db.models.fields.CharField')(max_length=180)),
('sms', self.gf('django.db.models.fields.CharField')(max_length=160)),
('for_total', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'quiz', ['FinalResponse'])
def backwards(self, orm):
# Deleting model 'Quiz'
db.delete_table(u'quiz_quiz')
# Deleting model 'Question'
db.delete_table(u'quiz_question')
# Deleting model 'Answer'
db.delete_table(u'quiz_answer')
# Deleting model 'FinalResponse'
db.delete_table(u'quiz_finalresponse')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'quiz.answer': {
'Meta': {'object_name': 'Answer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '156'}),
'correct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_id'", 'to': u"orm['quiz.Question']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '156'})
},
u'quiz.finalresponse': {
'Meta': {'object_name': 'FinalResponse'},
'for_total': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quiz_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fr_quiz_id'", 'to': u"orm['quiz.Quiz']"}),
'sms': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '180'})
},
u'quiz.question': {
'Meta': {'object_name': 'Question'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '163'}),
'quiz_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'q_quiz_id'", 'to': u"orm['quiz.Quiz']"})
},
u'quiz.quiz': {
'Meta': {'object_name': 'Quiz'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '163'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['quiz'] | mit | -552,876,124,836,205,300 | 60.311111 | 187 | 0.562953 | false |
mtils/ems | ems/qt/graphics/selection_rect.py | 1 | 3018 |
from ems.qt import QtWidgets, QtGui, QtCore
QGraphicsItem = QtWidgets.QGraphicsItem
QGraphicsObject = QtWidgets.QGraphicsObject
QObject = QtCore.QObject
pyqtProperty = QtCore.pyqtProperty
QStyle = QtWidgets.QStyle
QBrush = QtGui.QBrush
QColor = QtGui.QColor
QRectF = QtCore.QRectF
Qt = QtCore.Qt
QEvent = QtCore.QEvent
class SelectionRect(QGraphicsObject):
def __init__(self, parent=None):
super(SelectionRect, self).__init__(parent)
self._target = None
self._visible = None
self._margin = 10.0
self.setFlags(QGraphicsItem.ItemIsSelectable|
QGraphicsItem.ItemIsMovable)
def getTarget(self):
return self._target
def setTarget(self, target):
if self._target is target or target is self:
return
if self._target:
self._target.removeSceneEventFilter(self)
self._target = target
if self.scene() is not self._target.scene():
self._target.scene().addItem(self)
#self._target.positionChanged += self._moveWithTarget
self._target.installSceneEventFilter(self)
self.setPos(self._target.pos())
self.setZValue(self._target.zValue()-1)
target = pyqtProperty(QGraphicsItem, getTarget, setTarget)
def boundingRect(self):
if not self._target:
return QRectF()
targetRect = self._target.boundingRect()
myRect = QRectF(targetRect.topLeft(), targetRect.size())
myRect.setWidth(targetRect.width() + self._margin + self._margin)
myRect.setHeight(targetRect.height() + self._margin + self._margin)
#myRect.moveLeft(self._margin)
myRect.moveTo(targetRect.x() - self._margin, targetRect.y() - self._margin)
return myRect
def paint(self, painter, option, widget=None):
#super(TextItem, self).paint(painter, option, widget)
#if not (option.state & QStyle.State_Selected):
#return
rect = self.boundingRect()
innerRect = self._target.boundingRect()
#w = rect.width()
#h = rect.height()
#s = 4
brush = QBrush(QColor(128,179,255))
#painter.setPen(Qt.NoPen)
brush.setStyle(Qt.NoBrush)
painter.setBrush(brush)
#painter.setColor(QColor(128,179,255))
painter.drawRect(innerRect)
painter.drawRect(rect)
#painter.fillRect(0, 0, s, s, brush);
#painter.fillRect(0, 0 + h - s, s, s, brush);
#painter.fillRect(0 + w - s, 0, s, s, brush);
def mouseMoveEvent(self, event):
super(SelectionRect, self).mouseMoveEvent(event)
self._target.setPos(self.pos())
def sceneEventFilter(self, watched, event):
return False
print("event", event.type())
# Redirect Mouse move to self
if event.type() != QEvent.GraphicsSceneMouseMove:
return False
self.mouseMoveEvent(event)
return True
def _moveWithTarget(self, position):
self.setPos(position)
| mit | 349,490,669,558,714,940 | 31.804348 | 83 | 0.629225 | false |
jarifibrahim/ashoka-dashboard | dashboard/migrations/0018_auto_20170124_2054.py | 1 | 4825 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-24 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0017_auto_20170110_0012'),
]
operations = [
migrations.AlterModelOptions(
name='advisoryphase',
options={'verbose_name_plural': 'Advisory Phases (Optional)'},
),
migrations.AlterModelOptions(
name='consultantsurvey',
options={'verbose_name': 'Consultant Survey (Optional)', 'verbose_name_plural': 'Consultant Surveys (Optional)'},
),
migrations.AlterModelOptions(
name='fellowsurvey',
options={'verbose_name': 'Fellow Survey (Optional)', 'verbose_name_plural': 'Fellow Surveys (Optional)'},
),
migrations.AlterModelOptions(
name='member',
options={'verbose_name': 'Member', 'verbose_name_plural': 'Members'},
),
migrations.AlterModelOptions(
name='role',
options={'verbose_name_plural': 'Roles (Optional)'},
),
migrations.AlterModelOptions(
name='secondaryrole',
options={'verbose_name_plural': 'Secondary Roles (Optional)'},
),
migrations.AlterModelOptions(
name='team',
options={'verbose_name': 'Team', 'verbose_name_plural': 'Teams'},
),
migrations.AlterModelOptions(
name='teamstatus',
options={'verbose_name': 'Team status (Optional)', 'verbose_name_plural': 'Team status (Optional)'},
),
migrations.AlterModelOptions(
name='teamwarning',
options={'verbose_name': 'Team Warnings (Optional)', 'verbose_name_plural': 'Team Warnings (Optional)'},
),
migrations.AlterModelOptions(
name='weekwarning',
options={'verbose_name': 'Weekly Warnings (Optional)', 'verbose_name_plural': 'Weekly Warnings (Optional)'},
),
migrations.AddField(
model_name='teamwarning',
name='advisor_on',
field=models.CharField(choices=[('G', 'Green'), ('Y', 'Yellow'), ('R', 'Red')], default='G', max_length=3, verbose_name='Warning - Advisor Onboarding'),
),
migrations.AddField(
model_name='teamwarning',
name='advisor_on_comment',
field=models.CharField(blank=True, max_length=300, verbose_name='Comment - Advisory Onboarding'),
),
migrations.AddField(
model_name='teamwarning',
name='sys_vision',
field=models.CharField(choices=[('G', 'Green'), ('Y', 'Yellow'), ('R', 'Red')], default='G', max_length=3, verbose_name='Warning - Systemic Vision'),
),
migrations.AddField(
model_name='teamwarning',
name='sys_vision_comment',
field=models.CharField(blank=True, max_length=300, verbose_name='Comment - Systemic Vision'),
),
migrations.AddField(
model_name='weekwarning',
name='advisor_on_r',
field=models.BooleanField(default=False, help_text='Advisor Onboarding not happened in this week leads to Red warning.', verbose_name='Advisor Onboarding - Red warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='advisor_on_y',
field=models.BooleanField(default=False, help_text='Advisor Onboarding not happened in this week leads to Yellow warning.', verbose_name='Advisor Onboarding - Yellow warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='sys_vision_r',
field=models.BooleanField(default=False, help_text='Systemic Vision not happened in this week leads to Red warning', verbose_name='Systemic Vision - Red warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='sys_vision_y',
field=models.BooleanField(default=False, help_text='Systemic Vision not happened in this week leads to Yellow warning', verbose_name='Systemic Vision - Yellow warning'),
preserve_default=False,
),
migrations.AlterField(
model_name='member',
name='secondary_role',
field=models.ManyToManyField(blank=True, related_name='secondary_role', to='dashboard.SecondaryRole'),
),
migrations.AlterField(
model_name='teamstatus',
name='advisor_onboarding_comment',
field=models.TextField(blank=True, verbose_name='Advisor Onboarding Comment'),
),
]
| apache-2.0 | 7,202,574,898,264,482,000 | 43.266055 | 188 | 0.594197 | false |
atalax/AsteroidOSLinux | asteroid/__init__.py | 1 | 7309 |
import argparse
import collections
import datetime
import functools
import itertools
import random
import struct
import time
import xml
from asteroid import bleee
from gi.repository import GLib
def ensure_connected(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
# Note that this does not really strongly guarantee anything as the
# device can disconnect at any time
self.connect()
ret = fn(self, *args, **kwargs)
# Do we want to schedule a disconnect? Or is BLE low power enough?
return ret
return wrapper
class WeatherPredictions:
Prediction = collections.namedtuple("Prediction", ["id_", "min_", "max_"])
MAX_LEN = 5
def __init__(self, city_name):
self.city_name = city_name
self.values = []
def append_prediction(self, id_, min_, max_):
if len(self.values) >= WeatherPredictions.MAX_LEN:
raise ValueError("Maximum length exceeded")
self.values.append(WeatherPredictions.Prediction(
id_=id_, min_=min_, max_=max_))
@classmethod
def from_owm(class_, owmforecast):
# We will get None if the name is no
loc = owmforecast.get_location()
name = loc.get_name()
if not name:
name = "%.3f %.3f" % (loc.get_lat(), loc.get_lon())
ret = class_(name)
for x in range(WeatherPredictions.MAX_LEN):
w = owmforecast.get(x)
ret.append_prediction(w.get_weather_code(),
w.get_temperature()["min"],
w.get_temperature()["max"])
return ret
class Asteroid:
UUID_BATTERY = "00002a19-0000-1000-8000-00805f9b34fb"
UUID_TIME = "00005001-0000-0000-0000-00a57e401d05"
UUID_SCREENSHOT_REQ = "00006001-0000-0000-0000-00a57e401d05"
UUID_SCREENSHOT_RESP = "00006002-0000-0000-0000-00a57e401d05"
UUID_NOTIF_UPD = "00009001-0000-0000-0000-00a57e401d05"
UUID_WEATHER_CITY = "00008001-0000-0000-0000-00a57e401d05"
UUID_WEATHER_IDS = "00008002-0000-0000-0000-00a57e401d05"
UUID_WEATHER_MINT = "00008003-0000-0000-0000-00a57e401d05"
UUID_WEATHER_MAXT = "00008004-0000-0000-0000-00a57e401d05"
UUID_MEDIA_TITLE = "00007001-0000-0000-0000-00a57e401d05"
UUID_MEDIA_ALBUM = "00007002-0000-0000-0000-00a57e401d05"
UUID_MEDIA_ARTIST = "00007003-0000-0000-0000-00a57e401d05"
UUID_MEDIA_PLAY = "00007004-0000-0000-0000-00a57e401d05"
UUID_MEDIA_COMM = "00007005-0000-0000-0000-00a57e401d05"
MEDIA_COMMAND_PREVIOUS = 0x0
MEDIA_COMMAND_NEXT = 0x1
MEDIA_COMMAND_PLAY = 0x2
MEDIA_COMMAND_PAUSE = 0x3
def __init__(self, address):
self.ble = bleee.BLE()
self.address = address
self.dev = self.ble.device_by_address(self.address)
self.disconnect_timeout = None
self._disconnect_id = None
def connect(self):
# We also want to wait until services are resolved
while not self.dev.connected or not self.dev.services_resolved:
if not self.dev.connected:
try:
# Problematically, dbus calls block the entire event loop
# TODO: Fix this
self.dev.connect()
except GLib.GError:
# Just ignore everything for now
pass
else:
time.sleep(0.1)
@ensure_connected
def battery_level(self):
return self.dev.char_by_uuid(Asteroid.UUID_BATTERY).read()[0]
@ensure_connected
def update_time(self, to=None):
if to is None:
to = datetime.datetime.now()
data = [
to.year - 1900,
to.month - 1,
to.day,
to.hour,
to.minute,
to.second
]
self.dev.char_by_uuid(Asteroid.UUID_TIME).write(data)
@ensure_connected
def screenshot(self):
# TODO: This disconnects after a few callbacks, fix
crsp = self.dev.char_by_uuid(Asteroid.UUID_SCREENSHOT_RESP)
loop = GLib.MainLoop()
data_rem = None
def cb(*args):
print(args)
#loop.quit()
crsp.start_notify()
crsp.properties_changed.connect(cb)
self.dev.char_by_uuid(Asteroid.UUID_SCREENSHOT_REQ).write(b"\x00")
loop.run()
@ensure_connected
def notify(self, summary, body=None, id_=None, package_name=None,
app_name=None, app_icon=None):
if id_ is None:
id_ = random.randint(0, 2 ** 31)
id_ = str(id_)
xinsert = xml.etree.ElementTree.Element("insert")
for vl, xn in ((summary, "su"),
(body, "bo"),
(id_, "id"),
(package_name, "pn"),
(app_name, "an"),
(app_icon, "ai")):
if vl is not None:
xel = xml.etree.ElementTree.SubElement(xinsert, xn)
xel.text = vl
data = xml.etree.ElementTree.tostring(xinsert)
self.dev.char_by_uuid(Asteroid.UUID_NOTIF_UPD).write(data)
return id_
@ensure_connected
def update_weather(self, predictions):
# Set city name
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_CITY).write(
predictions.city_name.encode())
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_IDS).write(
struct.pack(">5H", *[round(p.id_) for p in predictions.values]))
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_MINT).write(
struct.pack(">5H", *[round(p.min_) for p in predictions.values]))
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_MAXT).write(
struct.pack(">5H", *[round(p.max_) for p in predictions.values]))
def update_media(self, title, album, artist, playing):
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_TITLE).write(title.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_ALBUM).write(album.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_ARTIST).write(artist.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_PLAY).write(
b"\x01" if playing else b"\x00")
def register_media_listener(self, fn):
# TODO: A way to unregister
ccomm = self.dev.char_by_uuid(Asteroid.UUID_MEDIA_COMM)
def cb(name, vals, lst):
if not "Value" in vals:
return
fn(vals["Value"][0])
ccomm.properties_changed.connect(cb)
ccomm.start_notify()
class DBusEavesdropper:
def __init__(self, bus, interface, member, callback):
self.bus = bus
self.interface = interface
self.member = member
self.callback = callback
self._dbus_ctl = self.bus.get("org.freedesktop.DBus")
# TODO: Escaping
# TODO: We probably want to unregister when destroyed?
self._match_id = self._dbus_ctl.AddMatch(
"interface=%s,member=%s,eavesdrop=true" %
(interface, member))
self.bus.con.add_filter(self._filter_func)
def _filter_func(self, con, msg, bl):
if msg.get_interface() == self.interface and \
msg.get_member() == self.member:
self.callback(msg)
| mit | 6,464,442,055,945,354,000 | 35.183168 | 80 | 0.586674 | false |
pypa/setuptools | setuptools/tests/test_msvc14.py | 1 | 2825 | # -*- coding: utf-8 -*-
"""
Tests for msvc support module (msvc14 unit tests).
"""
import os
from distutils.errors import DistutilsPlatformError
import pytest
import sys
@pytest.mark.skipif(sys.platform != "win32",
reason="These tests are only for win32")
class TestMSVC14:
"""Python 3.8 "distutils/tests/test_msvccompiler.py" backport"""
def test_no_compiler(self):
import setuptools.msvc as _msvccompiler
# makes sure query_vcvarsall raises
# a DistutilsPlatformError if the compiler
# is not found
def _find_vcvarsall(plat_spec):
return None, None
old_find_vcvarsall = _msvccompiler._msvc14_find_vcvarsall
_msvccompiler._msvc14_find_vcvarsall = _find_vcvarsall
try:
pytest.raises(DistutilsPlatformError,
_msvccompiler._msvc14_get_vc_env,
'wont find this version')
finally:
_msvccompiler._msvc14_find_vcvarsall = old_find_vcvarsall
def test_get_vc_env_unicode(self):
import setuptools.msvc as _msvccompiler
test_var = 'ṰḖṤṪ┅ṼẨṜ'
test_value = '₃⁴₅'
# Ensure we don't early exit from _get_vc_env
old_distutils_use_sdk = os.environ.pop('DISTUTILS_USE_SDK', None)
os.environ[test_var] = test_value
try:
env = _msvccompiler._msvc14_get_vc_env('x86')
assert test_var.lower() in env
assert test_value == env[test_var.lower()]
finally:
os.environ.pop(test_var)
if old_distutils_use_sdk:
os.environ['DISTUTILS_USE_SDK'] = old_distutils_use_sdk
def test_get_vc2017(self):
import setuptools.msvc as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2017
# and mark it skipped if we do not.
version, path = _msvccompiler._msvc14_find_vc2017()
if os.environ.get('APPVEYOR_BUILD_WORKER_IMAGE', '') in [
'Visual Studio 2017'
]:
assert version
if version:
assert version >= 15
assert os.path.isdir(path)
else:
pytest.skip("VS 2017 is not installed")
def test_get_vc2015(self):
import setuptools.msvc as _msvccompiler
# This function cannot be mocked, so pass it if we find VS 2015
# and mark it skipped if we do not.
version, path = _msvccompiler._msvc14_find_vc2015()
if os.environ.get('APPVEYOR_BUILD_WORKER_IMAGE', '') in [
'Visual Studio 2015', 'Visual Studio 2017'
]:
assert version
if version:
assert version >= 14
assert os.path.isdir(path)
else:
pytest.skip("VS 2015 is not installed")
| mit | 7,649,118,458,199,482,000 | 33.182927 | 73 | 0.592223 | false |
openstack/manila-ui | manila_ui/dashboards/project/share_snapshots/tabs.py | 1 | 1082 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class ShareSnapshotOverviewTab(tabs.Tab):
name = _("Share Snapshot Overview")
slug = "share_snapshot_overview_tab"
template_name = "project/share_snapshots/_detail.html"
def get_context_data(self, request):
return {"snapshot": self.tab_group.kwargs['snapshot']}
class ShareSnapshotDetailTabs(tabs.TabGroup):
slug = "share_snapshot_details"
tabs = (
ShareSnapshotOverviewTab,
)
| apache-2.0 | -1,347,417,031,697,469,000 | 33.903226 | 78 | 0.716266 | false |
HPENetworking/topology_lib_sflowtool | lib/topology_lib_sflowtool/parser.py | 1 | 9867 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Parse sflowtool commands with output to a Python dictionary.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from re import search, findall, DOTALL, match
from logging import getLogger
from collections import OrderedDict
log = getLogger(__name__)
def parse_pid(response):
"""
Parse PID shell output using a regular expression.
:param str response: Output of a shell forking a subprocess.
"""
assert response
pid_regex = r'\[\d*\]\s+(?P<pid>\d+)'
regex_result = search(pid_regex, response)
if not regex_result:
log.debug('Failed to parse pid from:\n{}'.format(response))
raise Exception('PID regular expression didn\'t match.')
return int(regex_result.groupdict()['pid'])
def parse_sflowtool(raw_output, mode):
"""
Parse the sflowtool output command raw output.
:param str raw_output: bash raw result string.
:rtype: dict
:return: In the line mode, all sflow packets seen at the collector parsed
in the form:
::
{
'flow_count':10
'sample_count':5
'packets':[
{
'packet_type':'FLOW',
'agent_address':'10.10.11.1',
'in_port':8,
....(fields in FLOW packet)
},
{
'packet_type':'CNTR',
'agent_address':'10.10.11.1',
'if_index':2,
....(fields in CNTR packet)
}
]
}
In the detail mode, all sflow packets seen at the collector parsed
in the form:
::
{
'datagrams':
[
{
'datagramSourceIP':'10.10.12.1',
'datagramSize':'924',
'unixSecondsUTC':'1473185811',
....(fields in datagram packet)
'samples':
[
{
'sampleType_tag':'0:1'
'sampleType':'FLOWSAMPLE'
'headerLen':'64'
....(fields in sample)
},
{
'sampleType_tag':'0:2'
'sampleType':'COUNTERSSAMPLE'
'sampleSequenceNo':'1'
....(fields in sample)
},
....(all the samples captured in the datagram)
'cntr_samples': 1,
'flow_samples': 1,
]
},
{
'datagramSourceIP':'10.10.12.1',
'datagramSize':'924',
'unixSecondsUTC':'1473185811',
....(fields in datagram packet)
'samples':
[
{
'sampleType_tag':'0:1'
'sampleType':'FLOWSAMPLE'
'headerLen':'64'
....(fields in sample)
},
{
'sampleType_tag':'0:2'
'sampleType':'COUNTERSSAMPLE'
'sampleSequenceNo':'2'
....(fields in sample)
},
....(all the samples captured in the datagram)
'cntr_samples': 1,
'flow_samples': 1
]
},
....(all the datagrams captured)
]
'number_of_datagrams': 2
}
"""
if mode == 'line':
# Refer https://github.com/sflow/sflowtool regarding below fields
flow_packet_fields = ['packet_type', 'agent_address', 'in_port',
'out_port', 'src_mac', 'dst_mac',
'eth_type', 'in_vlan', 'out_vlan',
'src_ip', 'dst_ip', 'ip_protocol', 'ip_tos',
'ip_ttl', 'icmp_type', 'icmp_code', 'tcp_flags',
'packet_size', 'ip_size', 'sampling_rate']
sample_packet_fields = ['packet_type', 'agent_address', 'if_index',
'if_type', 'if_speed', 'if_direction',
'if_status', 'in_octets', 'in_ucastPkts',
'in_mcastPkts', 'in_bcastPkts', 'in_discards',
'in_errors', 'in_unknownProtos', 'out_octets',
'out_ucastPkts', 'out_mcastPkts',
'out_bcastPkts', 'out_discards', 'out_errors',
'if_promiscuousMode']
output = raw_output.splitlines()
flow_count = 0
sample_count = 0
result = {}
packets = []
for line in output:
packet = {} # sFlow packet information
sflow_packet = line.split(",")
if sflow_packet[0] == 'FLOW':
assert len(sflow_packet) == len(flow_packet_fields)
for field in range(len(sflow_packet)):
packet[flow_packet_fields[field]] = sflow_packet[field]
flow_count = flow_count + 1
packets.append(packet)
elif sflow_packet[0] == 'CNTR':
assert len(sflow_packet) == len(sample_packet_fields)
for field in range(len(sflow_packet)):
packet[sample_packet_fields[field]] = sflow_packet[field]
sample_count = sample_count + 1
packets.append(packet)
result['flow_count'] = flow_count
result['sample_count'] = sample_count
result['packets'] = packets
return result
elif mode == 'detail':
result = {}
result['datagrams'] = []
result['number_of_datagrams'] = 0
# Strings to be used while matching datagrams and samples
# in the output from sflowtool
start_datagram = 'startDatagram =================================\n'
end_datagram = 'endDatagram =================================\n'
start_sample = 'startSample ----------------------\n'
end_sample = 'endSample ----------------------\n'
# Regex string for identifying start/end of datagrams & samples
finder = r'{}(.*?){}'
# Regex to parse datagram attributes
datagram_info_re = (
r'datagramSourceIP\s(?P<datagramSourceIP>.+)\s'
r'datagramSize\s(?P<datagramSize>.+)\s'
r'unixSecondsUTC\s(?P<unixSecondsUTC>.+)\s'
r'datagramVersion\s(?P<datagramVersion>.+)\s'
r'agentSubId\s(?P<agentSubId>.+)\s'
r'agent\s(?P<agent>.+)\s'
r'packetSequenceNo\s(?P<packetSequenceNo>.+)\s'
r'sysUpTime\s(?P<sysUpTime>.+)\s'
r'samplesInPacket\s(?P<samplesInPacket>\d+)\s'
)
# Regex for matching attributes inside a sample
attribute_re = '((.+) (.+))'
# Make a list of datagrams from the sflowtool raw output
datagrams = findall(
finder.format(start_datagram, end_datagram), raw_output, DOTALL)
for datagram in datagrams:
# Get the datagram specific attributes and form a dict
re_result = match(datagram_info_re, datagram, DOTALL)
datagram_dict = re_result.groupdict()
# Initialize sample specific data inside the datagram_dict
datagram_dict['samples'] = []
datagram_dict['flow_samples'] = 0
datagram_dict['cntr_samples'] = 0
# Get list of samples from within the datagram
samples = findall(
finder.format(start_sample, end_sample), datagram, DOTALL)
for sample in samples:
sample_lines = sample.splitlines()
sample_dict = {}
# Match the attributes of each sample and populate
# into the sample_dict
for sample_line in sample_lines:
attribute = match(attribute_re, sample_line)
sample_dict[attribute.group(2)] = attribute.group(3)
# Add the sample to the list of samples under the datagram
datagram_dict['samples'].append(sample_dict)
# Increment respective counters based on type of sample
if sample_dict['sampleType'] == 'FLOWSAMPLE':
datagram_dict['flow_samples'] += 1
elif sample_dict['sampleType'] == 'COUNTERSSAMPLE':
datagram_dict['cntr_samples'] += 1
# Add the parsed datagram to result and increment count
# of datagrams
result['datagrams'].append(datagram_dict)
result['number_of_datagrams'] += 1
return result
__all__ = [
'parse_sflowtool'
]
| apache-2.0 | -8,103,275,785,004,340,000 | 35.275735 | 78 | 0.480085 | false |
bbengfort/cloudscope | tests/test_utils/test_statistics.py | 1 | 23826 | # tests.test_utils.test_statistics
# Testing for the statistics utility module.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Tue Aug 23 13:40:49 2016 -0400
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: test_statistics.py [] [email protected] $
"""
Testing for the statistics utility module.
"""
##########################################################################
## Imports
##########################################################################
import unittest
from itertools import product
from cloudscope.utils.statistics import *
##########################################################################
## Fixtures
##########################################################################
# Empty test case
EMPTY = []
# Float test cases
FLOATS = [
# Uniform random [0.0, 1.0)
[ 0.43730873, 0.66860239, 0.34969353, 0.64048078, 0.06388402,
0.27340017, 0.77561069, 0.0469865 , 0.00952501, 0.905221 ,
0.85934168, 0.81024019, 0.06976906, 0.54518943, 0.27677394,
0.12399665, 0.43676722, 0.5155873 , 0.38699299, 0.76616917,
0.02502538, 0.40382399, 0.99043387, 0.71853195, 0.42132248,
0.23079655, 0.12753139, 0.72196278, 0.63156918, 0.58127711,
0.323632 , 0.75723769, 0.55014024, 0.48530899, 0.81193682,
0.63641341, 0.9896141 , 0.59410421, 0.08999124, 0.44973318,
0.20534478, 0.35249505, 0.68384246, 0.10588445, 0.81486703,
0.82123886, 0.23312338, 0.29706749, 0.95132877, 0.53760118,
0.52506907, 0.18586977, 0.10429846, 0.37754277, 0.80177148,
0.8923954 , 0.01853723, 0.32609851, 0.83528495, 0.59851704,
0.94780306, 0.00333868, 0.64453206, 0.68733814, 0.69465826,
0.17311021, 0.81104648, 0.36074105, 0.86521824, 0.57384475,
0.54296227, 0.95244882, 0.4340912 , 0.79415668, 0.36713392,
0.01035679, 0.37757458, 0.86641362, 0.24478224, 0.48594984,
0.16053626, 0.4092285 , 0.52627802, 0.12932203, 0.49634128,
0.69001666, 0.62750143, 0.22644635, 0.61771225, 0.26848362,
0.38573517, 0.82619298, 0.4761255 , 0.60803911, 0.25304987,
0.30113422, 0.57631252, 0.66860624, 0.23604634, 0.21473307 ],
# Uniform random [0.0, 100.0]
[ 22.20520866, 17.17258577, 8.49659732, 13.95346708,
55.55125426, 8.80317998, 24.68324592, 96.84491714,
22.72401521, 73.64288806, 42.17152252, 14.37810073,
34.24014255, 60.81097632, 59.87367563, 52.2406963 ,
6.49507369, 39.25094041, 72.35007601, 94.3952359 ,
28.06879455, 39.47692788, 13.88718282, 6.97516371,
66.55848707, 31.92083665, 25.32500032, 56.42714507,
7.51769482, 28.60784098, 24.08155829, 89.91651969,
47.86004113, 71.85761032, 82.4294561 , 68.91388351,
4.44603844, 42.60146732, 64.99026944, 11.28079872,
89.95259469, 21.62473926, 40.67768745, 74.03776227,
47.28452248, 42.43533983, 4.54809125, 64.33809063,
0.48360149, 53.58079114, 71.05946081, 68.42234587,
70.6191961 , 89.55513029, 23.68983622, 46.13571428,
95.80340964, 31.05251035, 18.16043837, 85.30237868,
34.85336423, 85.13625608, 33.24675386, 65.98221573,
43.41008904, 1.41689122, 25.66436842, 97.83154993,
28.95244763, 58.6737343 , 31.11425024, 39.89891167,
18.87817841, 63.74118985, 7.34593289, 11.56234643,
70.74670506, 94.08037005, 20.42316914, 46.72559006,
41.20221363, 81.16258525, 83.10004094, 22.6069545 ,
46.25125172, 19.02403741, 41.4161593 , 17.98574115,
83.66625755, 66.30583531, 74.77229409, 81.07751229,
6.00914795, 18.30008296, 37.99743388, 23.08334708,
36.67738259, 28.58053073, 76.88689287, 88.09260102 ],
# Normal random gaussian distribution with mean=0 and sigma=1
[ 1.36628297, -0.95063669, -0.7409544 , 0.49168896, 0.64369943,
-1.36683641, -0.85050743, 0.14056131, 0.40071956, 0.06894656,
-0.35099341, -0.94658349, -0.05191993, -0.68728832, 1.5290626 ,
-0.29765041, -1.47736747, 1.42462537, -0.93737476, -1.75120617,
0.37956676, -0.41298492, 1.26101492, -1.11693991, 0.86228129,
1.25771588, -1.698729 , -0.34431668, -0.34907691, 1.52828139,
-1.65994198, -1.22920884, -1.416939 , 0.4581475 , 0.25962794,
-1.10938565, -2.01038612, -0.89623881, -0.02473882, -0.10925982,
1.49019119, -0.71829783, -0.57516934, 1.31854532, 0.64051439,
-0.539811 , -0.36544998, 0.34168854, 1.03403893, 0.1788948 ,
0.3961166 , -2.04685416, -0.50117633, 0.72760044, -1.23274552,
-0.34341958, -0.75571399, 1.39371562, -0.01919108, -0.17840926,
0.27388972, 0.81694269, 0.19208915, -0.90984528, -0.43602705,
-1.9333356 , -0.82054677, -0.22563851, 0.38139457, 0.35015976,
0.70850311, -0.24979133, -0.83115026, -0.22170927, -1.47006649,
-1.42263061, 2.67703557, -0.4531137 , -0.01348267, -0.1477644 ,
-0.59528241, -0.99513121, -0.19154543, 1.3695901 , -0.40227537,
1.37180334, 0.52361872, -0.09802685, 1.70726494, -0.28957362,
2.12909179, 0.91799377, 0.75537678, 0.35040934, -0.20546863,
1.70405968, 0.96502427, 0.81638739, 1.88802825, 1.06889865 ],
# Normal random gaussian distribution with mean=100 and sigma=18
[ 97.15759554, 77.17442118, 121.65339951, 115.47590292,
83.13049538, 80.58906683, 133.28962059, 101.19894129,
102.23057183, 92.82933217, 96.243821 , 97.2783628 ,
99.62594213, 119.51777017, 141.770821 , 111.48806454,
87.59254024, 92.02257259, 87.26595797, 106.22640402,
120.33377392, 80.79363771, 85.66117399, 62.35418484,
135.02835057, 128.33176531, 105.24356978, 125.16042398,
84.50687617, 99.95500342, 102.14580588, 129.78867181,
130.95831888, 107.58328424, 78.38888971, 85.59218946,
132.50719329, 82.13758304, 100.1639717 , 80.05534368,
91.68220069, 86.70158004, 88.42494344, 92.22226738,
93.75785656, 95.36178327, 85.2791005 , 88.03325987,
100.78703198, 136.73102739, 92.70148723, 115.0907645 ,
120.05135927, 100.12796585, 70.13846055, 136.07669925,
97.44144139, 109.51705036, 99.31486862, 111.37134817,
78.430312 , 114.61626988, 103.06188281, 55.51858758,
104.55853914, 126.27837134, 91.4791138 , 91.74949264,
99.45526277, 60.83926795, 73.31706548, 109.1869211 ,
92.71997445, 92.29068272, 69.76686038, 96.69493926,
98.98031343, 105.2876436 , 124.74573867, 123.59994673,
114.99458381, 93.90003085, 89.68415181, 135.73404241,
112.74098956, 118.8758599 , 77.30905375, 83.08948144,
123.81454249, 95.22466886, 79.00660774, 87.19579895,
105.46176326, 110.65034971, 95.13515247, 62.24700869,
88.66132196, 90.00716862, 107.83890058, 97.73738434],
]
# Integer test cases
INTEGERS = [
# Between 1 and 10
[7, 6, 1, 1, 7, 3, 9, 5, 7, 6, 3, 1, 5, 3, 9, 5, 1, 8, 1, 2, 2, 2, 5,
2, 1, 6, 8, 5, 8, 4, 7, 8, 3, 3, 7, 4, 4, 4, 7, 5, 5, 9, 2, 6, 6, 6,
1, 4, 7, 1, 6, 6, 6, 5, 5, 6, 9, 5, 7, 6, 5, 6, 3, 8, 9, 9, 6, 6, 2,
1, 3, 1, 6, 2, 7, 4, 3, 6, 7, 3, 4, 2, 9, 2, 4, 4, 9, 5, 5, 5, 4, 7,
5, 4, 2, 4, 7, 3, 8, 1, 8, 1, 9, 7, 6, 4, 4, 1, 3, 6, 9, 1, 1, 5, 6],
# Between 10 and 500
[128, 142, 351, 128, 436, 451, 28, 416, 204, 194, 429, 33, 55,
122, 305, 466, 293, 386, 203, 201, 194, 288, 184, 15, 486, 39,
419, 311, 190, 101, 164, 79, 16, 206, 176, 74, 189, 12, 77,
182, 296, 280, 169, 282, 415, 108, 407, 11, 268, 135, 356, 326,
312, 294, 225, 406, 172, 331, 196, 266, 80, 406, 388, 205, 401,
421, 224, 106, 45, 247, 200, 201, 451, 205, 179, 279, 172, 30,
216, 236, 56, 323, 206, 14, 383, 211, 106, 24, 60, 210, 36,
83, 348, 276, 397, 415, 32, 58, 15, 224, 379, 248, 166, 450,
161, 74, 306, 412, 471, 108, 169, 157, 75, 59, 14, 295, 390],
# Bits (1s and 0s)
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1,
0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0,
1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
# With negative numbers (between -10 and 10)
[-10, 8, 0, 0, -5, 6, -1, 2, -10, -4, -5, 1, 8,
-7, 2, -2, -5, 5, 5, -5, -6, -1, -1, 5, 9, 1,
1, -10, 9, 5, -7, 7, 8, -1, -5, -5, 3, -7, 8,
-2, 6, -6, 0, -9, -7, -10, -2, 5, -8, -9, 7, -9,
-6, 4, -2, -8, -7, 2, -3, 8, -1, 9, 7, 4, -3,
8, 3, -5, 4, -5, -10, -8, 3, -9, -9, -6, -3, 2,
-6, -10, -6, 8, -4, -10, 4, -8, -6, -3, -8, 6, 2,
-5, 6, 2, 0, -4, -2, -8, -7, 5, 6, 9, 9, 4,
5, 8, 3, -9, -6, 7, -1, 6, -5, 1, -2, -7, 0],
]
##########################################################################
## Statistics Tests
##########################################################################
class StatisticsTests(unittest.TestCase):
"""
Test cases for the statistics helper functions
"""
def test_mean_edge_cases(self):
"""
Test any edge cases that are passed to mean.
"""
self.assertEqual(mean(EMPTY), None)
self.assertEqual(mean(None), None)
def test_mean_integers(self):
"""
Test mean on integers returns correct float
"""
# Computed using numpy
expected = [
4.8260869565217392, 219.92307692307693,
0.47826086956521741, -0.89743589743589747
]
for idx, data in enumerate(INTEGERS):
mu = mean(data)
self.assertIsInstance(mu, float)
self.assertEqual(mu, expected[idx])
def test_mean_floats(self):
"""
Test mean on floats returns correct float
"""
# Computed using numpy
expected = [
0.48705447450000006, 45.260727738500009,
-0.014150190199999982, 99.931501583200003
]
for idx, data in enumerate(FLOATS):
mu = mean(data)
self.assertIsInstance(mu, float)
self.assertAlmostEqual(mu, expected[idx])
def test_median_edge_cases(self):
"""
Test any edge cases that are passed to median.
"""
self.assertEqual(median(EMPTY), None)
self.assertEqual(median(None), None)
def test_median_integers(self):
"""
Test median on integers returns correct float
"""
# Computed using numpy
expected = [5, 204, 0, -1]
for idx, data in enumerate(INTEGERS):
mu = median(data)
self.assertIsInstance(mu, int)
self.assertEqual(mu, expected[idx])
def test_median_floats(self):
"""
Test median on floats returns correct float
"""
# Computed using numpy
expected = [
0.49114555999999998, 41.309186464999996,
-0.12851210999999998, 97.589412865
]
for idx, data in enumerate(FLOATS):
mu = median(data)
self.assertIsInstance(mu, float)
self.assertAlmostEqual(mu, expected[idx])
def test_median_even_integers(self):
"""
Test median on an even lengthed list of integers
"""
cases = [
[5, 6, 9, 6, 7, 2, 5, 5, 5, 3],
[6, 1, 6, 7, 2, 1, 4, 9, 2, 8, 3, 8, 7, 5],
[6, 5, 6, 1, 5, 1, 6, 8, 2, 6, 8, 5, 5, 2, 1, 8, 1, 7]
]
# Computed using numpy
expected = [5.0, 5.5, 5.0]
for case, expect in zip(cases, expected):
mu = median(case)
self.assertIsInstance(mu, float)
self.assertEqual(expect, mu)
##########################################################################
## Online Variance Tests
##########################################################################
class OnlineVarianceTests(unittest.TestCase):
"""
Test cases for the OnlineVariance class
"""
def test_mean_edge_cases(self):
"""
Test any edge cases that are passed to mean.
"""
online = OnlineVariance()
# Test the case of no samples
self.assertEqual(online.mean, 0.0)
self.assertEqual(online.std, 0.0)
self.assertEqual(online.var, 0.0)
# Test the case of one sample
online.update(42)
self.assertEqual(online.mean, 42.0)
self.assertEqual(online.std, 0.0)
self.assertEqual(online.var, 0.0)
def test_online_variance_length(self):
"""
Test that the length of an online variance is the number of samples.
"""
cases = INTEGERS + FLOATS
expected = [115,117,115,117,100,100,100,100]
for data, case in zip(cases, expected):
online = OnlineVariance()
self.assertEqual(len(online), 0)
for item in data:
online.update(item)
self.assertEqual(len(online), case)
def test_online_integers_mean(self):
"""
Test online variance computing means on integers
"""
# Computed using numpy
expected = [
4.8260869565217392, 219.92307692307693,
0.47826086956521741, -0.89743589743589747
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.mean, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.mean, float)
self.assertEqual(online.mean, case)
def test_online_float_mean(self):
"""
Test online variance computing means on floats.
"""
# Computed using numpy
expected = [
0.48705447450000006, 45.260727738500009,
-0.014150190199999982, 99.931501583200003
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.mean, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.mean, float)
self.assertAlmostEqual(online.mean, case)
@unittest.skip("currently not accurate to enough places")
def test_online_integers_variance(self):
"""
Test online variance computing variance on integers
"""
# Computed using numpy
expected = [
5.9001890359168243, 18264.618014464173,
0.24952741020793956, 35.203155818540431
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.variance, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.variance, float)
self.assertAlmostEqual(online.variance, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_float_variance(self):
"""
Test online variance computing variance on floats.
"""
# Computed using numpy
expected = [
0.073895851263651294, 766.42173756693592,
1.0187313521468584, 348.99176719359377
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.variance, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.variance, float)
self.assertAlmostEqual(online.variance, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_integers_standard_deviation(self):
"""
Test online variance computing standard deviation on integers
"""
# Computed using numpy
expected = [
2.4290304724142149, 135.1466537301763,
0.49952718665548079, 5.9332247402690239
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.standard_deviation, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.standard_deviation, float)
self.assertAlmostEqual(online.standard_deviation, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_float_standard_deviation(self):
"""
Test online variance computing standard deviation on floats.
"""
# Computed using numpy
expected = [
0.27183791358758491, 27.684322956629007,
1.0093222241419528, 18.681321344958278
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.standard_deviation, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.standard_deviation, float)
self.assertAlmostEqual(online.standard_deviation, case, places=3)
def test_online_variance_initialization(self):
"""
Be able to initialize online variance with an iterable.
"""
# Computed using numpy
expected = [
(115, 4.8260869565217392),
(117, 219.92307692307693),
(115, 0.47826086956521741),
(117, -0.89743589743589747),
(100, 0.48705447450000006),
(100, 45.260727738500009),
(100, -0.014150190199999982),
(100, 99.931501583200003),
]
for data, (length, mean) in zip(INTEGERS+FLOATS, expected):
online = OnlineVariance(data)
self.assertEqual(len(online),length)
self.assertAlmostEqual(online.mean, mean)
def test_online_variance_addition(self):
"""
Be able to add two online variance objects together for a new mean.
"""
# Computed using numpy
expected = [
# (mean, stddev, variance)
(4.8260869565217392, 2.4290304724142149, 5.9001890359168243),
(113.30172413793103, 144.15193253022923, 20779.779652199759),
(2.652173913043478, 2.7929833769049353, 7.8007561436672956),
(1.9396551724137931, 5.3728063576641221, 28.867048156956006),
(2.8079323137209302, 2.8060961522546988, 7.8741756156986247),
(23.632896622558139, 27.683598847110545, 766.38164512774028),
(2.5748138650232555, 3.0754200874004387, 9.4582087139861208),
(49.061163527069773, 49.150086127405451, 2415.730966331374),
(113.30172413793103, 144.15193253022923, 20779.779652199762),
(219.92307692307693, 135.14665373017627, 18264.618014464169),
(111.14655172413794, 145.77129904765351, 21249.271626040427),
(109.51282051282051, 146.08331631545036, 21340.335305719924),
(118.80048593294931, 147.68865272870397, 21811.93814481972),
(139.43351508686635, 133.34488923389478, 17780.859484799668),
(118.56951604138249, 147.87525029514103, 21867.089649850604),
(164.62742008442396, 116.55887937387467, 13585.972360893467),
(2.652173913043478, 2.7929833769049353, 7.8007561436672956),
(111.14655172413794, 145.77129904765351, 21249.271626040427),
(0.47826086956521741, 0.49952718665548079, 0.24952741020793953),
(-0.21551724137931033, 4.2837021421670043, 18.35010404280618),
(0.48235091837209304, 0.40970422355484531, 0.16785755079867867),
(21.307315227209301, 29.249540614746596, 855.53562617371074),
(0.24923246967441859, 0.8170794298465649, 0.66761879467838758),
(46.735582131720932, 51.216754643725118, 2623.1559562355387),
(1.9396551724137931, 5.3728063576641221, 28.867048156956006),
(109.51282051282051, 146.08331631545036, 21340.335305719924),
(-0.21551724137931033, 4.2837021421670052, 18.350104042806187),
(-0.89743589743589747, 5.9332247402690239, 35.203155818540431),
(-0.25942190115207375, 4.4148407820487128, 19.490819130840489),
(20.37360725276498, 30.025743253384654, 901.54525791817412),
(-0.49039179271889405, 4.4321344921977124, 19.643816156928672),
(45.567512250322572, 52.017556151674398, 2705.8261479925991),
(2.8079323137209302, 2.8060961522546988, 7.8741756156986256),
(118.80048593294931, 147.68865272870397, 21811.93814481972),
(0.48235091837209304, 0.40970422355484531, 0.16785755079867867),
(-0.25942190115207375, 4.4148407820487128, 19.490819130840489),
(0.48705447450000006, 0.27183791358758491, 0.073895851263651294),
(22.8738911065, 29.739170652473767, 884.41827109695691),
(0.23645214215000002, 0.78045828247544069, 0.60911513068451473),
(50.209278028850001, 51.447374536619328, 2646.8323467111868),
(23.632896622558139, 27.683598847110545, 766.38164512774028),
(139.43351508686635, 133.34488923389478, 17780.859484799668),
(21.307315227209301, 29.2495406147466, 855.53562617371097),
(20.373607252764977, 30.025743253384654, 901.54525791817412),
(22.873891106500004, 29.739170652473767, 884.41827109695691),
(45.260727738500009, 27.684322956629007, 766.4217375669358),
(22.623288774150005, 29.936163370148371, 896.17387732421298),
(72.59611466085002, 36.123816666776065, 1304.9301305748484),
(2.5748138650232555, 3.0754200874004387, 9.4582087139861208),
(118.56951604138249, 147.875250295141, 21867.089649850601),
(0.24923246967441862, 0.8170794298465649, 0.66761879467838758),
(-0.49039179271889405, 4.4321344921977124, 19.643816156928676),
(0.23645214215000002, 0.78045828247544069, 0.60911513068451473),
(22.623288774149998, 29.936163370148371, 896.17387732421298),
(-0.014150190199999982, 1.0093222241419528, 1.0187313521468584),
(49.958675696500002, 51.6941831967128, 2672.2885763753043),
(49.061163527069773, 49.150086127405451, 2415.730966331374),
(164.62742008442396, 116.55887937387467, 13585.972360893466),
(46.735582131720932, 51.216754643725118, 2623.1559562355387),
(45.567512250322586, 52.017556151674405, 2705.8261479925995),
(50.209278028849994, 51.447374536619328, 2646.8323467111868),
(72.596114660849992, 36.123816666776065, 1304.9301305748484),
(49.958675696499995, 51.6941831967128, 2672.2885763753043),
(99.931501583200003, 18.681321344958278, 348.99176719359377)
]
for (a, b), (mean, stddev, variance) in zip(product(INTEGERS + FLOATS, repeat=2), expected):
oa = OnlineVariance(a)
ob = OnlineVariance(b)
online = oa + ob
self.assertIsNot(oa, ob)
self.assertIsNot(oa, online)
self.assertIsNot(ob, online)
self.assertAlmostEqual(mean, online.mean)
# Not precise enough for these calculations
# self.assertAlmostEqual(stddev, online.stddev)
# self.assertAlmostEqual(variance, online.variance)
| mit | -3,628,218,771,555,699,700 | 42.163043 | 100 | 0.580123 | false |
fzheng/codejam | lib/python2.7/site-packages/ipyparallel/client/map.py | 1 | 3690 | # encoding: utf-8
"""Classes used in scattering and gathering sequences.
Scattering consists of partitioning a sequence and sending the various
pieces to individual nodes in a cluster.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import division
import sys
from itertools import islice, chain
numpy = None
def is_array(obj):
"""Is an object a numpy array?
Avoids importing numpy until it is requested
"""
global numpy
if 'numpy' not in sys.modules:
return False
if numpy is None:
import numpy
return isinstance(obj, numpy.ndarray)
class Map(object):
"""A class for partitioning a sequence using a map."""
def getPartition(self, seq, p, q, n=None):
"""Returns the pth partition of q partitions of seq.
The length can be specified as `n`,
otherwise it is the value of `len(seq)`
"""
n = len(seq) if n is None else n
# Test for error conditions here
if p<0 or p>=q:
raise ValueError("must have 0 <= p <= q, but have p=%s,q=%s" % (p, q))
remainder = n % q
basesize = n // q
if p < remainder:
low = p * (basesize + 1)
high = low + basesize + 1
else:
low = p * basesize + remainder
high = low + basesize
try:
result = seq[low:high]
except TypeError:
# some objects (iterators) can't be sliced,
# use islice:
result = list(islice(seq, low, high))
return result
def joinPartitions(self, listOfPartitions):
return self.concatenate(listOfPartitions)
def concatenate(self, listOfPartitions):
testObject = listOfPartitions[0]
# First see if we have a known array type
if is_array(testObject):
return numpy.concatenate(listOfPartitions)
# Next try for Python sequence types
if isinstance(testObject, (list, tuple)):
return list(chain.from_iterable(listOfPartitions))
# If we have scalars, just return listOfPartitions
return listOfPartitions
class RoundRobinMap(Map):
"""Partitions a sequence in a round robin fashion.
This currently does not work!
"""
def getPartition(self, seq, p, q, n=None):
n = len(seq) if n is None else n
return seq[p:n:q]
def joinPartitions(self, listOfPartitions):
testObject = listOfPartitions[0]
# First see if we have a known array type
if is_array(testObject):
return self.flatten_array(listOfPartitions)
if isinstance(testObject, (list, tuple)):
return self.flatten_list(listOfPartitions)
return listOfPartitions
def flatten_array(self, listOfPartitions):
test = listOfPartitions[0]
shape = list(test.shape)
shape[0] = sum([ p.shape[0] for p in listOfPartitions])
A = numpy.ndarray(shape)
N = shape[0]
q = len(listOfPartitions)
for p,part in enumerate(listOfPartitions):
A[p:N:q] = part
return A
def flatten_list(self, listOfPartitions):
flat = []
for i in range(len(listOfPartitions[0])):
flat.extend([ part[i] for part in listOfPartitions if len(part) > i ])
return flat
def mappable(obj):
"""return whether an object is mappable or not."""
if isinstance(obj, (tuple,list)):
return True
if is_array(obj):
return True
return False
dists = {'b':Map,'r':RoundRobinMap}
| mit | -205,987,033,074,137,200 | 28.758065 | 82 | 0.599187 | false |
petroswork/pydantic | tests/test_main.py | 1 | 10156 | import json
from typing import Any
import pytest
from pydantic import BaseModel, ConfigError, NoneBytes, NoneStr, Required, ValidationError, constr
from pydantic.exceptions import pretty_errors
def test_success():
# same as below but defined here so class definition occurs inside the test
class Model(BaseModel):
a: float
b: int = 10
m = Model(a=10.2)
assert m.a == 10.2
assert m.b == 10
class UltraSimpleModel(BaseModel):
a: float = ...
b: int = 10
def test_ultra_simple_missing():
with pytest.raises(ValidationError) as exc_info:
UltraSimpleModel()
assert """\
error validating input
a:
field required (error_type=Missing)""" == str(exc_info.value)
def test_ultra_simple_failed():
with pytest.raises(ValidationError) as exc_info:
UltraSimpleModel(a='x', b='x')
assert """\
2 errors validating input
a:
could not convert string to float: 'x' (error_type=ValueError track=float)
b:
invalid literal for int() with base 10: 'x' (error_type=ValueError track=int)\
""" == str(exc_info.value)
def test_ultra_simple_repr():
m = UltraSimpleModel(a=10.2)
assert repr(m) == '<UltraSimpleModel a=10.2 b=10>'
assert repr(m.fields['a']) == ("<Field a: type='float', required=True, "
"validators=['float', 'number_size_validator']>")
assert dict(m) == {'a': 10.2, 'b': 10}
def test_str_truncate():
class Model(BaseModel):
s1: str
s2: str
b1: bytes
b2: bytes
m = Model(s1='132', s2='x' * 100, b1='123', b2='x' * 100)
print(repr(m.to_string()))
assert m.to_string() == ("Model s1='132' "
"s2='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx…' "
"b1=b'123' "
"b2=b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx…")
assert """\
Model
s1='132'
s2='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx…'
b1=b'123'
b2=b'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx…""" == m.to_string(pretty=True)
def test_comparing():
m = UltraSimpleModel(a=10.2, b='100')
assert m == {'a': 10.2, 'b': 100}
assert m == UltraSimpleModel(a=10.2, b=100)
def test_nullable_strings_success():
class NoneCheckModel(BaseModel):
existing_str_value = 'foo'
required_str_value: str = ...
required_str_none_value: NoneStr = ...
existing_bytes_value = b'foo'
required_bytes_value: bytes = ...
required_bytes_none_value: NoneBytes = ...
m = NoneCheckModel(
required_str_value='v1',
required_str_none_value=None,
required_bytes_value='v2',
required_bytes_none_value=None,
)
assert m.required_str_value == 'v1'
assert m.required_str_none_value is None
assert m.required_bytes_value == b'v2'
assert m.required_bytes_none_value is None
def test_nullable_strings_fails():
class NoneCheckModel(BaseModel):
existing_str_value = 'foo'
required_str_value: str = ...
required_str_none_value: NoneStr = ...
existing_bytes_value = b'foo'
required_bytes_value: bytes = ...
required_bytes_none_value: NoneBytes = ...
try:
NoneCheckModel(
required_str_value=None,
required_str_none_value=None,
required_bytes_value=None,
required_bytes_none_value=None,
)
except ValidationError as e:
assert """\
{
"required_bytes_value": {
"error_msg": "None is not an allow value",
"error_type": "TypeError",
"track": "bytes"
},
"required_str_value": {
"error_msg": "None is not an allow value",
"error_type": "TypeError",
"track": "str"
}
}""" == json.dumps(pretty_errors(e.errors_raw), indent=2, sort_keys=True)
class RecursiveModel(BaseModel):
grape: bool = ...
banana: UltraSimpleModel = ...
def test_recursion():
m = RecursiveModel(grape=1, banana={'a': 1})
assert m.grape is True
assert m.banana.a == 1.0
assert m.banana.b == 10
assert repr(m) == '<RecursiveModel grape=True banana=<UltraSimpleModel a=1.0 b=10>>'
def test_recursion_fails():
with pytest.raises(ValidationError):
RecursiveModel(grape=1, banana=123)
class PreventExtraModel(BaseModel):
foo = 'whatever'
class Config:
ignore_extra = False
def test_prevent_extra_success():
m = PreventExtraModel()
assert m.foo == 'whatever'
m = PreventExtraModel(foo=1)
assert m.foo == '1'
def test_prevent_extra_fails():
with pytest.raises(ValidationError) as exc_info:
PreventExtraModel(foo='ok', bar='wrong', spam='xx')
assert exc_info.value.message == '2 errors validating input'
assert """\
bar:
extra fields not permitted (error_type=Extra)
spam:
extra fields not permitted (error_type=Extra)""" == exc_info.value.display_errors
class InvalidValidator:
@classmethod
def get_validators(cls):
yield cls.has_wrong_arguments
@classmethod
def has_wrong_arguments(cls, value, bar):
pass
def test_invalid_validator():
with pytest.raises(ConfigError) as exc_info:
class InvalidValidatorModel(BaseModel):
x: InvalidValidator = ...
assert exc_info.value.args[0].startswith('Invalid signature for validator')
def test_no_validator():
with pytest.raises(ConfigError) as exc_info:
class NoValidatorModel(BaseModel):
x: object = ...
assert exc_info.value.args[0] == "no validator found for <class 'object'>"
def test_unable_to_infer():
with pytest.raises(ConfigError) as exc_info:
class InvalidDefinitionModel(BaseModel):
x = None
assert exc_info.value.args[0] == 'unable to infer type for attribute "x"'
def test_not_required():
class Model(BaseModel):
a: float = None
assert Model(a=12.2).a == 12.2
assert Model().a is None
assert Model(a=None).a is None
def test_infer_type():
class Model(BaseModel):
a = False
b = ''
c = 0
assert Model().a is False
assert Model().b == ''
assert Model().c == 0
def test_allow_extra():
class Model(BaseModel):
a: float = ...
class Config:
allow_extra = True
assert Model(a='10.2', b=12).dict() == {'a': 10.2, 'b': 12}
def test_set_attr():
m = UltraSimpleModel(a=10.2)
assert m.dict() == {'a': 10.2, 'b': 10}
m.b = 20
assert m.dict() == {'a': 10.2, 'b': 20}
def test_values_depreciated():
m = UltraSimpleModel(a=10.2)
assert m.dict() == {'a': 10.2, 'b': 10}
with pytest.warns(DeprecationWarning):
assert m.values() == {'a': 10.2, 'b': 10}
def test_set_attr_invalid():
class UltraSimpleModel(BaseModel):
a: float = ...
b: int = 10
m = UltraSimpleModel(a=10.2)
assert m.dict() == {'a': 10.2, 'b': 10}
with pytest.raises(ValueError) as exc_info:
m.c = 20
assert '"UltraSimpleModel" object has no field "c"' in str(exc_info)
def test_any():
class AnyModel(BaseModel):
a: Any = 10
assert AnyModel().a == 10
assert AnyModel(a='foobar').a == 'foobar'
def test_alias():
class Model(BaseModel):
a = 'foobar'
class Config:
fields = {
'a': {'alias': '_a'}
}
assert Model().a == 'foobar'
assert Model().dict() == {'a': 'foobar'}
assert Model(_a='different').a == 'different'
assert Model(_a='different').dict() == {'a': 'different'}
def test_field_order():
class Model(BaseModel):
c: float
b: int = 10
a: str
d: dict = {}
# fields are ordered as defined except annotation-only fields come last
assert list(Model.__fields__.keys()) == ['c', 'a', 'b', 'd']
def test_required():
# same as below but defined here so class definition occurs inside the test
class Model(BaseModel):
a: float = Required
b: int = 10
m = Model(a=10.2)
assert m.dict() == dict(a=10.2, b=10)
with pytest.raises(ValidationError) as exc_info:
Model()
assert """\
error validating input
a:
field required (error_type=Missing)\
""" == str(exc_info.value)
def test_not_immutability():
class TestModel(BaseModel):
a: int = 10
class Config:
allow_mutation = True
allow_extra = False
m = TestModel()
assert m.a == 10
m.a = 11
assert m.a == 11
with pytest.raises(ValueError) as exc_info:
m.b = 11
assert '"TestModel" object has no field "b"' in str(exc_info)
def test_immutability():
class TestModel(BaseModel):
a: int = 10
class Config:
allow_mutation = False
allow_extra = False
m = TestModel()
assert m.a == 10
with pytest.raises(TypeError) as exc_info:
m.a = 11
assert '"TestModel" is immutable and does not support item assignment' in str(exc_info)
with pytest.raises(ValueError) as exc_info:
m.b = 11
assert '"TestModel" object has no field "b"' in str(exc_info)
class ValidateAssignmentModel(BaseModel):
a: int = 2
b: constr(min_length=1)
class Config:
validate_assignment = True
def test_validating_assignment_pass():
p = ValidateAssignmentModel(a=5, b='hello')
p.a = 2
assert p.a == 2
assert p.dict() == {'a': 2, 'b': 'hello'}
p.b = 'hi'
assert p.b == 'hi'
assert p.dict() == {'a': 2, 'b': 'hi'}
def test_validating_assignment_fail():
p = ValidateAssignmentModel(a=5, b='hello')
with pytest.raises(ValidationError) as exc_info:
p.a = 'b'
assert """error validating input
a:
invalid literal for int() with base 10: 'b' (error_type=ValueError track=int)""" == str(exc_info.value)
with pytest.raises(ValidationError) as exc_info:
p.b = ''
assert """error validating input
b:
length less than minimum allowed: 1 (error_type=ValueError track=ConstrainedStrValue)""" == str(exc_info.value)
| mit | 5,959,447,907,986,627,000 | 25.846561 | 116 | 0.609283 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/hplip/base/g.py | 1 | 11695 | # -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
# NOTE: This module is safe for 'from g import *'
#
# Std Lib
import sys
import os
import os.path
import ConfigParser
import locale
import pwd
import stat
import re
# Local
from codes import *
import logger
# System wide logger
log = logger.Logger('', logger.Logger.LOG_LEVEL_INFO, logger.Logger.LOG_TO_CONSOLE)
log.set_level('info')
MINIMUM_PYQT_MAJOR_VER = 3
MINIMUM_PYQT_MINOR_VER = 14
MINIMUM_QT_MAJOR_VER = 3
MINIMUM_QT_MINOR_VER = 0
def to_bool(s, default=False):
if isinstance(s, str) and s:
if s[0].lower() in ['1', 't', 'y']:
return True
elif s[0].lower() in ['0', 'f', 'n']:
return False
elif isinstance(s, bool):
return s
return default
# System wide properties
class Properties(dict):
def __getattr__(self, attr):
if attr in self.keys():
return self.__getitem__(attr)
else:
return ""
def __setattr__(self, attr, val):
self.__setitem__(attr, val)
prop = Properties()
class ConfigBase(object):
def __init__(self, filename):
self.filename = filename
self.conf = ConfigParser.ConfigParser()
self.read()
def get(self, section, key, default=u''):
try:
return self.conf.get(section, key)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
def set(self, section, key, value):
if not self.conf.has_section(section):
self.conf.add_section(section)
self.conf.set(section, key, value)
self.write()
def sections(self):
return self.conf.sections()
def has_section(self, section):
return self.conf.has_section(section)
def options(self, section):
return self.conf.options(section)
keys = options
def read(self):
if self.filename is not None:
filename = self.filename
if filename.startswith("/root/"):
# Don't try opening a file in root's home directory.
log.error("attempted to read from '%s'" % self.filename)
return
try:
fp = open(self.filename, "r")
self.conf.readfp(fp)
fp.close()
except (OSError, IOError):
log.debug("Unable to open file %s for reading." % self.filename)
def write(self):
if self.filename is not None:
filename = self.filename
if filename.startswith("/root/") or filename.startswith("/etc/"):
# Don't try writing a file in root's home directory or
# the system-wide config file.
# See bug #479178.
log.error("attempted to write to '%s'" % self.filename)
return
try:
fp = open(self.filename, "w")
self.conf.write(fp)
fp.close()
except (OSError, IOError):
log.debug("Unable to open file %s for writing." % self.filename)
class SysConfig(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, '/etc/hp/hplip.conf')
class State(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, '/var/lib/hp/hplip.state')
class UserConfig(ConfigBase):
def __init__(self):
if not os.geteuid() == 0:
prop.user_dir = os.path.expanduser('~/.hplip')
try:
if not os.path.exists(prop.user_dir):
os.makedirs(prop.user_dir)
except OSError:
pass # This is sometimes OK, if running hpfax: for example
prop.user_config_file = os.path.join(prop.user_dir, 'hplip.conf')
if not os.path.exists(prop.user_config_file):
try:
file(prop.user_config_file, 'w').close()
s = os.stat(os.path.dirname(prop.user_config_file))
os.chown(prop.user_config_file, s[stat.ST_UID], s[stat.ST_GID])
except IOError:
pass
ConfigBase.__init__(self, prop.user_config_file)
else:
# If running as root, conf file is None
prop.user_dir = None
prop.user_config_file = None
ConfigBase.__init__(self, None)
def workingDirectory(self):
t = self.get('last_used', 'working_dir', os.path.expanduser("~"))
try:
t = t.decode('utf-8')
except UnicodeError:
log.error("Invalid unicode: %s" % t)
log.debug("working directory: %s" % t)
return t
def setWorkingDirectory(self, t):
self.set('last_used', 'working_dir', t.encode('utf-8'))
log.debug("working directory: %s" % t.encode('utf-8'))
os.umask(0037)
# System Config File: Directories and build settings. Not altered after installation.
sys_conf = SysConfig()
# System State File: System-wide runtime settings
sys_state = State()
# Per-user Settings File: (Note: For Qt4 code, limit the use of this to non-GUI apps. only)
user_conf = UserConfig()
# Language settings
try:
prop.locale, prop.encoding = locale.getdefaultlocale()
except ValueError:
prop.locale = 'en_US'
prop.encoding = 'UTF8'
prop.version = sys_conf.get('hplip', 'version', '0.0.0') # e.g., 3.9.2b.10
_p, _x = re.compile(r'(\d\w*)', re.I), []
for _y in prop.version.split('.')[:3]:
_z = _p.match(_y)
if _z is not None:
_x.append(_z.group(1))
prop.installed_version = '.'.join(_x) # e.g., '3.9.2'
try:
prop.installed_version_int = int(''.join(['%02x' % int(_y) for _y in _x]), 16) # e.g., 0x030902 -> 198914
except ValueError:
prop.installed_version_int = 0
prop.home_dir = sys_conf.get('dirs', 'home', os.path.realpath(os.path.normpath(os.getcwd())))
prop.username = pwd.getpwuid(os.getuid())[0]
pdb = pwd.getpwnam(prop.username)
prop.userhome = pdb[5]
prop.history_size = 50
prop.data_dir = os.path.join(prop.home_dir, 'data')
prop.image_dir = os.path.join(prop.home_dir, 'data', 'images')
prop.xml_dir = os.path.join(prop.home_dir, 'data', 'xml')
prop.models_dir = os.path.join(prop.home_dir, 'data', 'models')
prop.localization_dir = os.path.join(prop.home_dir, 'data', 'localization')
prop.max_message_len = 8192
prop.max_message_read = 65536
prop.read_timeout = 90
prop.ppd_search_path = '/usr/share;/usr/local/share;/usr/lib;/usr/local/lib;/usr/libexec;/opt;/usr/lib64'
prop.ppd_search_pattern = 'HP-*.ppd.*'
prop.ppd_download_url = 'http://www.linuxprinting.org/ppd-o-matic.cgi'
prop.ppd_file_suffix = '-hpijs.ppd'
# Build and install configurations
prop.gui_build = to_bool(sys_conf.get('configure', 'gui-build', '0'))
prop.net_build = to_bool(sys_conf.get('configure', 'network-build', '0'))
prop.par_build = to_bool(sys_conf.get('configure', 'pp-build', '0'))
prop.usb_build = True
prop.scan_build = to_bool(sys_conf.get('configure', 'scanner-build', '0'))
prop.fax_build = to_bool(sys_conf.get('configure', 'fax-build', '0'))
prop.doc_build = to_bool(sys_conf.get('configure', 'doc-build', '0'))
prop.foomatic_xml_install = to_bool(sys_conf.get('configure', 'foomatic-xml-install', '0'))
prop.foomatic_ppd_install = to_bool(sys_conf.get('configure', 'foomatic-ppd-install', '0'))
prop.hpcups_build = to_bool(sys_conf.get('configure', 'hpcups-install', '0'))
prop.hpijs_build = to_bool(sys_conf.get('configure', 'hpijs-install', '0'))
# Spinner, ala Gentoo Portage
spinner = "\|/-\|/-"
spinpos = 0
def update_spinner():
global spinner, spinpos
if not log.is_debug() and sys.stdout.isatty():
sys.stdout.write("\b" + spinner[spinpos])
spinpos=(spinpos + 1) % 8
sys.stdout.flush()
def cleanup_spinner():
if not log.is_debug() and sys.stdout.isatty():
sys.stdout.write("\b \b")
sys.stdout.flush()
# Internal/messaging errors
ERROR_STRINGS = {
ERROR_SUCCESS : 'No error',
ERROR_UNKNOWN_ERROR : 'Unknown error',
ERROR_DEVICE_NOT_FOUND : 'Device not found',
ERROR_INVALID_DEVICE_ID : 'Unknown/invalid device-id field',
ERROR_INVALID_DEVICE_URI : 'Unknown/invalid device-uri field',
ERROR_DATA_LENGTH_EXCEEDS_MAX : 'Data length exceeds maximum',
ERROR_DEVICE_IO_ERROR : 'Device I/O error',
ERROR_NO_PROBED_DEVICES_FOUND : 'No probed devices found',
ERROR_DEVICE_BUSY : 'Device busy',
ERROR_DEVICE_STATUS_NOT_AVAILABLE : 'DeviceStatus not available',
ERROR_INVALID_SERVICE_NAME : 'Invalid service name',
ERROR_ERROR_INVALID_CHANNEL_ID : 'Invalid channel-id (service name)',
ERROR_CHANNEL_BUSY : 'Channel busy',
ERROR_DEVICE_DOES_NOT_SUPPORT_OPERATION : 'Device does not support operation',
ERROR_DEVICEOPEN_FAILED : 'Device open failed',
ERROR_INVALID_DEVNODE : 'Invalid device node',
ERROR_INVALID_HOSTNAME : "Invalid hostname ip address",
ERROR_INVALID_PORT_NUMBER : "Invalid JetDirect port number",
ERROR_NO_CUPS_QUEUE_FOUND_FOR_DEVICE : "No CUPS queue found for device.",
ERROR_DATFILE_ERROR: "DAT file error",
ERROR_INVALID_TIMEOUT: "Invalid timeout",
ERROR_IO_TIMEOUT: "I/O timeout",
ERROR_FAX_INCOMPATIBLE_OPTIONS: "Incompatible fax options",
ERROR_FAX_INVALID_FAX_FILE: "Invalid fax file",
ERROR_FAX_FILE_NOT_FOUND: "Fax file not found",
ERROR_INTERNAL : 'Unknown internal error',
}
class Error(Exception):
def __init__(self, opt=ERROR_INTERNAL):
self.opt = opt
self.msg = ERROR_STRINGS.get(opt, ERROR_STRINGS[ERROR_INTERNAL])
log.debug("Exception: %d (%s)" % (opt, self.msg))
Exception.__init__(self, self.msg, opt)
# Make sure True and False are avail. in pre-2.2 versions
try:
True
except NameError:
True = (1==1)
False = not True
# as new translations are completed, add them here
supported_locales = { 'en_US': ('us', 'en', 'en_us', 'american', 'america', 'usa', 'english'),}
# Localization support was disabled in 3.9.2
#'zh_CN': ('zh', 'cn', 'zh_cn' , 'china', 'chinese', 'prc'),
#'de_DE': ('de', 'de_de', 'german', 'deutsche'),
#'fr_FR': ('fr', 'fr_fr', 'france', 'french', 'français'),
#'it_IT': ('it', 'it_it', 'italy', 'italian', 'italiano'),
#'ru_RU': ('ru', 'ru_ru', 'russian'),
#'pt_BR': ('pt', 'br', 'pt_br', 'brazil', 'brazilian', 'portuguese', 'brasil', 'portuguesa'),
#'es_MX': ('es', 'mx', 'es_mx', 'mexico', 'spain', 'spanish', 'espanol', 'español'),
#}
| gpl-3.0 | 3,392,872,497,385,884,000 | 32.991279 | 116 | 0.593175 | false |
ctlewitt/Invisible-Keyboard | analyze_ngram_stats.py | 1 | 3628 | import re
import string
actual_letter_frequency = {"a":"11.602%", "b":"4.702%", "c":"3.511%", "d":"2.670%", "e":"2.007%", "f":"3.779%", "g":"1.950%", "h":"7.232%", "i":"6.286%", "j":".597%", "k":".590%", "l":"2.705%", "m":"4.374%", "n":"2.365%", "o":"6.264%", "p":"2.545%", "q":".173%", "r":"1.653%", "s":"7.755%", "t":"16.671%", "u":"1.487%", "v":".649%", "w":"6.753%", "x":".017%", "y":"1.620%", "z":".034%"}
#data from: http://en.wikipedia.org/wiki/Letter_frequency#Relative_frequencies_of_the_first_letters_of_a_word_in_the_English_language
LETTER = 0
COUNT = 1
#letter_count is a list of lists [letter, count]
def check_each_word(all_letters_file_write, one_letter_file_read):
#get number of lines/words
#get min frequency
#get max frequency
#get total number count
#get average count per word
num_words = 0
min_frequency = 100000
max_frequency = 0
total_frequency = 0
with open(one_letter_file_read) as f_read:
for line in f_read:
#get word frequency
###CHECK THIS REGULAR EXPRESSION###
word_frequency_info = re.match('.*\t([0-9]*)\n', line)
word_frequency_words = word_frequency_info.groups()
word_frequency = int(word_frequency_words[0])
#set stats
num_words += 1
total_frequency += word_frequency
if min_frequency > word_frequency:
min_frequency = word_frequency
if max_frequency < word_frequency:
max_frequency = word_frequency
average_frequency = total_frequency / num_words
#print results
all_letters_file_write.write("num_words: " + str(num_words) + "\n")
all_letters_file_write.write("min_frequency: " + str(min_frequency) + "\n")
all_letters_file_write.write("max_frequency: " + str(max_frequency) + "\n")
all_letters_file_write.write("total_frequency: " + str(total_frequency) + "\n")
all_letters_file_write.write("average_frequency: " + str(average_frequency) + "\n")
return num_words, total_frequency
def get_file_name(letter):
return "combined_one_gram_55Kthresh_" + letter + ".txt"
letter_count = []
total_words = 0
sum_total_frequency = 0
#go through each 1-gram letter's file and collect stats on the word counts for each one
with open("combined_one_gram_55Kthresh_stats.txt", "w") as n_gram_stats:
n_gram_stats.write("Stats on words starting with each letter\n")
for letter in string.lowercase:
n_gram_stats.write(letter + ":\n")
num_words, frequency = check_each_word(n_gram_stats, get_file_name(letter)) #checks new_one_gram_[letter].txt data
n_gram_stats.write("\n")
letter_count.append([letter, num_words])
total_words += num_words
sum_total_frequency += frequency
total_words += 0.0 #turn into double for future calculations. sort of a hack.... :(
#record percent stats to each letter's 1-gram data. (ie, What percentage of the total words begin with this letter?)
n_gram_stats.write("\n\n\n")
n_gram_stats.write("AGGREGATE RESULTS:\n")
for letter_stat in letter_count:
n_gram_stats.write(letter_stat[LETTER] + ":\n")
n_gram_stats.write(" count: " + str(letter_stat[COUNT]) + "\n")
n_gram_stats.write(" prcnt: " + str(letter_stat[COUNT]*100/total_words) + "%\n")
n_gram_stats.write(" want: " + str(actual_letter_frequency[letter_stat[LETTER]]) + "\n")
n_gram_stats.write("\n\n")
n_gram_stats.write("Total Word Count: " + str(total_words) + "\n")
n_gram_stats.write("Average Frequency: " + str(sum_total_frequency/total_words) + "\n") | mit | -3,000,366,039,412,939,000 | 44.3625 | 386 | 0.615766 | false |
Affirm/cabot | cabot/cabotapp/tasks.py | 1 | 11043 | import os
from datetime import timedelta
import logging
from celery import Celery
from celery._state import set_default_app
from celery.task import task
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from cabot.cabotapp.models import Schedule, StatusCheckResultTag, StatusCheckResult, Acknowledgement, StatusCheck
from cabot.cabotapp.schedule_validation import update_schedule_problems
from cabot.cabotapp.utils import build_absolute_url
from cabot.celery.celery_queue_config import STATUS_CHECK_TO_QUEUE
from django.conf import settings
from django.utils import timezone
from cabot.cabotapp import models
from cabot.metricsapp.defs import SCHEDULE_PROBLEMS_EMAIL_SNOOZE_HOURS
from cabot.metricsapp.models import MetricsStatusCheckBase
celery = Celery(__name__)
celery.config_from_object(settings)
# Celery should set this app as the default, however the 'celery.current_app'
# api uses threadlocals, so code running in different threads/greenlets uses
# the fallback default instead of this app when no app is specified. This
# causes confusing connection errors when celery tries to connect to a
# non-existent rabbitmq server. It seems to happen mostly when using the
# 'celery.canvas' api. To get around this, we use the internal 'celery._state'
# api to force our app to be the default.
set_default_app(celery)
logger = logging.getLogger(__name__)
def _classify_status_check(pk):
"""
Maps the check to either normal or high priority based on the dict
cabot.celery.celery_queue_config.STATUS_CHECK_TO_QUEUE
"""
check = models.StatusCheck.objects.get(pk=pk)
# If the status check we are running is an instance of MetricsStatusCheckBase
# (i.e. Grafana/Elasticsearch), then StatusCheck.importance is determined by
# the type of failure: If the 'high_alert_value' is set and the check fails,
# the importance is set to ERROR or CRITICAL. However, if this value is null
# or does not fail, and 'warning_value' is not null and fails instead, then
# the importance is set to WARNING. As such, we run all importance levels of
# MetricsStatusCheckBase based on their maximum importance.
if not isinstance(check, MetricsStatusCheckBase):
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][check.importance]
else:
if check.high_alert_value is None:
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][models.CheckGroupMixin.WARNING_STATUS]
else:
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][check.high_alert_importance]
return check_queue
@task(ignore_result=True)
def run_status_check(pk):
check = models.StatusCheck.objects.get(pk=pk)
check.run()
@task(ignore_result=True)
def run_all_checks():
checks = models.StatusCheck.objects.filter(active=True).all()
for check in checks:
if check.should_run():
check_queue = _classify_status_check(check.pk)
run_status_check.apply_async((check.pk,), queue=check_queue, routing_key=check_queue)
@task(ignore_result=True)
def update_services(ignore_result=True):
# Avoid importerrors and the like from legacy scheduling
return
@task(ignore_result=True)
def update_service(service_or_id):
if not isinstance(service_or_id, models.Service):
service = models.Service.objects.get(id=service_or_id)
else:
service = service_or_id
service.update_status()
@task(ignore_result=True)
def update_all_services():
services = models.Service.objects.filter(alerts_enabled=True)
for service in services:
update_service.apply_async((service.id,))
@task(ignore_result=True)
def update_shifts_and_problems():
schedules = models.Schedule.objects.all()
for schedule in schedules:
update_shift_and_problems.apply_async((schedule.id,))
@task(ignore_result=True)
def update_shift_and_problems(schedule_id):
schedule = models.Schedule.objects.get(id=schedule_id)
try:
models.update_shifts(schedule)
except Exception:
logger.exception('Error when updating shifts for schedule %s.', schedule.name)
try:
update_schedule_problems(schedule) # must happen after update_shifts()
except Exception:
logger.exception('Error when updating schedule problems for schedule %s.', schedule.name)
# if there are any problems, queue an email to go out
if schedule.has_problems() and not schedule.problems.is_silenced():
send_schedule_problems_email.apply_async((schedule.pk,))
@task(ignore_result=True)
def reset_shifts_and_problems(schedule_id):
"""
Update shifts & problems for a schedule, called by the Schedule post_save signal handler.
Does not send schedule problems warning emails.
"""
try:
schedule = models.Schedule.objects.get(id=schedule_id)
except Schedule.DoesNotExist:
# this can happen if the schedule got deleted after this task was scheduled but before it started to run
return
try:
models.update_shifts(schedule)
except Exception:
logger.exception('Error when resetting shifts for schedule %s.', schedule.name)
try:
update_schedule_problems(schedule)
except Exception:
logger.exception('Error updating schedule problems list for schedule %s.', schedule.name)
@task(ignore_result=True)
def clean_db(days_to_retain=60):
"""
Clean up database otherwise it gets overwhelmed with StatusCheckResults.
To loop over undeleted results, spawn new tasks to make sure
db connection closed etc
"""
to_discard_results = models.StatusCheckResult.objects.filter(
time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_snapshots = models.ServiceStatusSnapshot.objects.filter(
time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_acks = models.Acknowledgement.objects.filter(
closed_at__lte=timezone.now()-timedelta(days=days_to_retain))
result_ids = to_discard_results.values_list('id', flat=True)[:100]
snapshot_ids = to_discard_snapshots.values_list('id', flat=True)[:100]
ack_ids = to_discard_acks.values_list('id', flat=True)[:100]
if not result_ids:
logger.info('Completed deleting StatusCheckResult objects')
if not snapshot_ids:
logger.info('Completed deleting ServiceStatusSnapshot objects')
if not ack_ids:
logger.info('Completed deleting Acknowledgement objects')
if (not snapshot_ids) and (not result_ids) and (not ack_ids):
return
logger.info('Processing %s StatusCheckResult objects' % len(result_ids))
logger.info('Processing %s ServiceStatusSnapshot objects' %
len(snapshot_ids))
models.StatusCheckResult.objects.filter(id__in=result_ids).delete()
models.ServiceStatusSnapshot.objects.filter(id__in=snapshot_ids).delete()
models.Acknowledgement.objects.filter(id__in=ack_ids).delete()
clean_db.apply_async(kwargs={'days_to_retain': days_to_retain},
countdown=3)
# because django 1.6 doesn't have send_mail(html_message=...) :|
def _send_mail_html(subject, message, from_email, recipient_list):
msg = EmailMessage(subject, message, from_email, recipient_list)
msg.content_subtype = 'html' # main content type is html
msg.send()
@task(ignore_result=True)
def send_schedule_problems_email(schedule_id):
"""
Send off an email as a celery task
:param schedule_id schedule ID
"""
try:
schedule = models.Schedule.objects.get(pk=schedule_id)
problems = schedule.problems
except models.Schedule.DoesNotExist, models.ScheduleProblems.DoesNotExist:
# if the schedule or problems got deleted, nothing to do
logger.info("schedule or problems for pk {} no longer exist, not sending email".format(schedule_id))
return
# check if problems became silenced since the email got queued
if problems.is_silenced():
logger.info("schedule problems became silenced, not sending email")
return
# build the message
# make the schedule link absolute (add domain name) because this is going into an email
cabot_schedule_url = build_absolute_url(schedule.get_edit_url())
# build links to snooze email alerts
snooze_hours = SCHEDULE_PROBLEMS_EMAIL_SNOOZE_HOURS
snooze_links = [build_absolute_url(reverse('snooze-schedule-warnings',
kwargs={'pk': schedule.pk, 'hours': hours})) for hours in snooze_hours]
snoozes = ['<a href="{}">{} hours</a>'.format(link, hours) for link, hours in zip(snooze_links, snooze_hours)]
message = 'The schedule <a href="{}">{}</a> has some issues:\n\n{}\n\n' \
'Click <a href="{}">here</a> to review the schedule\'s configuration.\n' \
'If you don\'t want to deal with this right now, you can silence these alerts for {}.' \
.format(cabot_schedule_url, schedule.name, problems.text, cabot_schedule_url, ' | '.join(snoozes))
message = message.replace('\n', '\n<br/>') # html ignores newlines
# figure out who to send it to (on-call + fallback)
recipients = models.get_duty_officers(schedule) + models.get_fallback_officers(schedule)
recipients = list(set([r.email for r in recipients if r.email])) # get unique emails
# for extra visibility, also log a warning
logger.warn("Sending schedule problems email to {}:\n\n{}".format(recipients, message))
if len(recipients) > 0:
try:
_send_mail_html(subject="Cabot Schedule '{}' Has Problems".format(schedule.name),
message=message,
from_email='Cabot Updates<{}>'.format(os.environ.get('CABOT_FROM_EMAIL')),
recipient_list=recipients)
except Exception as e:
logger.exception('Error sending schedule problems email: {}'.format(e))
@task(ignore_result=True)
def clean_orphaned_tags():
ack_tags = Acknowledgement.tags.through.objects.values('statuscheckresulttag')
result_tags = StatusCheckResult.tags.through.objects.values('statuscheckresulttag')
orphaned_tags = StatusCheckResultTag.objects.exclude(pk__in=ack_tags).exclude(pk__in=result_tags)
logger.info("Deleting {} orphaned tags (out of {} total tags)..."
.format(orphaned_tags.count(), StatusCheckResultTag.objects.count()))
orphaned_tags.delete()
@task(ignore_result=True)
def close_expired_acknowledgements():
now = timezone.now()
# loop over open acks where expire_at >= now
for ack in Acknowledgement.objects.filter(closed_at__isnull=True, expire_at__lte=now):
ack.close('expired')
update_check_and_services.apply_async((ack.status_check_id,))
@task(ignore_result=True)
def update_check_and_services(check_id):
# type: (int) -> None
check = StatusCheck.objects.get(id=check_id)
check.run()
for service in check.service_set.all():
service.update_status()
| mit | -190,869,898,702,287,970 | 39.01087 | 118 | 0.700625 | false |
lalstef/QuickDatesFormatter | quick_dates_formatter.py | 1 | 3710 | import sublime, sublime_plugin
from datetime import datetime
# Date to be shown as example in the formats list
EXAMPLE_DATE = datetime(1970, 12, 31)
class QuickdatesformatterFormatDatesCommand(sublime_plugin.WindowCommand):
# Needed to find the dates in the chosen format within the text
date_to_regex = {
'%d/%m/%Y': r'\d{1,2}/\d{1,2}/\d{4}',
'%m/%d/%Y': r'\d{1,2}/\d{1,2}/\d{4}',
'%Y/%m/%d': r'\d{4}/\d{1,2}/\d{1,2}',
'%d-%m-%Y': r'\d{1,2}-\d{1,2}-\d{4}',
'%m-%d-%Y': r'\d{1,2}-\d{1,2}-\d{4}',
'%Y-%m-%d': r'\d{4}-\d{1,2}-\d{1,2}',
'%d.%m.%Y': r'\d{1,2}\.\d{1,2}\.\d{4}',
'%m.%d.%Y': r'\d{1,2}\.\d{1,2}\.\d{4}',
'%Y.%m.%d': r'\d{4}\.\d{1,2}\.\d{1,2}'
}
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.settings = None
self.formats = None
self.target_format = None
self.format = None
self.dates = [] # dates in current selection as datetime objects
self.formatted_dates = [] # date strings formatted in the target format
def format_highlighted(self, index):
view = self.window.active_view()
view.sel().clear()
# If quick panel cancelled, clear current state and return.
# ( index of -1 means that the quick panel was cancelled )
if index == -1:
self.dates = []
self.formatted_dates = []
return
# Get the format and the corresponding regex
self.format = self.formats[index][0]
pattern = self.date_to_regex[self.format]
# Add all found dates to the current selection
for region in view.find_all(pattern):
contents = view.substr(view.word(region))
try:
# To check if the regex match really fits the chosen format, we try parsing the string
# Then just add it to the list of dates, not to parse it again later
date_obj = datetime.strptime(contents, self.format)
self.dates.append(date_obj)
# If the string fits the chosen format, then add the region
view.sel().add(region)
except ValueError:
# Nothing to handle here, the string is not in the right format
pass
def format_selected(self, index):
# When format is selected, prompt the user for the desired target format
self.window.show_input_panel(
"Target format",
self.settings.get('target_format'),
self.target_format_selected,
self.target_format_change,
self.target_format_cancelled,
)
def target_format_cancelled(self):
# Clear current selection and formatted_dates list
self.window.active_view().sel().clear()
self.dates = []
self.formatted_dates = []
def target_format_change(self, fmt):
pass
def target_format_selected(self, fmt):
"""
Replace selected dates with dates formatted in target format as soon as the target format is input
"""
# Run replace_dates TextCommand
view = self.window.active_view()
view.run_command('quickdatesformatter_replace_dates', {'formatted_dates':
[ datetime.strftime(date_obj, self.target_format) for date_obj in self.dates]})
def run(self, *args, **kwargs):
self.settings = sublime.load_settings('QuickDatesFormatter.sublime-settings')
self.formats = self.settings.get('formats')
self.target_format = self.settings.get('target_format')
self.window.show_quick_panel(
[[label, datetime.strftime(EXAMPLE_DATE, fmt)] for fmt, label in self.formats],
self.format_selected,
sublime.MONOSPACE_FONT,
0, # menu item index which is highlighted by default
self.format_highlighted
)
class QuickdatesformatterReplaceDatesCommand(sublime_plugin.TextCommand):
def run(self, edit, formatted_dates=None):
regions = self.view.sel()
if formatted_dates and len(formatted_dates) >= len(regions):
for i in range(len(regions)):
self.view.replace(edit, regions[i], formatted_dates[i])
| gpl-2.0 | -1,456,843,975,556,486,700 | 31.831858 | 100 | 0.675472 | false |
pcournut/deep-learning-for-combinatorial-optimization | Google Pointer Net/decoder.py | 1 | 5801 | import tensorflow as tf
distr = tf.contrib.distributions
# RNN decoder with pointer net for the sequence-to-sequence model.
class pointer_decoder(object):
def __init__(self, encoder_output, cell, temperature, C, inference_mode, initializer):
self.encoder_output = encoder_output # Ref vectors to which attention is pointed: Tensor [Batch size x time steps x cell.state_size]
self.cell = cell # DECODER LSTM Cell
self.batch_size = encoder_output.get_shape()[0] # batch size
self.seq_length = encoder_output.get_shape()[1] # sequence length
self.n_hidden = cell.output_size # num_neurons
# Attending mechanism
with tf.variable_scope("glimpse") as glimpse:
self.W_ref_g =tf.get_variable("W_ref_g",[1,self.n_hidden,self.n_hidden],initializer=initializer)
self.W_q_g =tf.get_variable("W_q_g",[self.n_hidden,self.n_hidden],initializer=initializer)
self.v_g =tf.get_variable("v_g",[self.n_hidden],initializer=initializer)
# Pointing mechanism
with tf.variable_scope("pointer") as pointer:
self.W_ref =tf.get_variable("W_ref",[1,self.n_hidden,self.n_hidden],initializer=initializer)
self.W_q =tf.get_variable("W_q",[self.n_hidden,self.n_hidden],initializer=initializer)
self.v =tf.get_variable("v",[self.n_hidden],initializer=initializer)
self.mask = tf.zeros((self.batch_size,self.seq_length))
self.inference_mode = inference_mode # True for inference / False for training
self.temperature = temperature # temperature parameter
self.C = C # logit clip
self.log_softmax = [] # store log(p_theta(pi(t)|pi(<t),s)) for backprop
self.positions = [] # store visited cities for reward
# From a query (decoder output) [Batch size, n_hidden] and a set of reference (encoder_output) [Batch size, seq_length, n_hidden]
# predict a distribution over next decoder input
def attention(self,ref,query,temperature):
encoded_ref_g = tf.nn.conv1d(ref, self.W_ref_g, 1, "VALID", name="encoded_ref_g") # [Batch size, seq_length, n_hidden]
encoded_query_g = tf.expand_dims(tf.matmul(query, self.W_q_g, name="encoded_query_g"), 1) # [Batch size, 1, n_hidden]
scores_g = tf.reduce_sum(self.v_g * tf.tanh(encoded_ref_g + encoded_query_g), [-1], name="scores_g") # [Batch size, seq_length]
attention_g = tf.nn.softmax(scores_g,name="attention_g")
# 1 Glimpse = Linear combination of ref weighted by attention mask (or mask) = pointing mechanism query #########################################
glimpse = tf.multiply(ref, tf.expand_dims(attention_g,2))
glimpse = tf.reduce_sum(glimpse,1)
encoded_ref = tf.nn.conv1d(ref, self.W_ref, 1, "VALID", name="encoded_ref") # [Batch size, seq_length, n_hidden]
encoded_query = tf.expand_dims(tf.matmul(glimpse, self.W_q, name="encoded_query"), 1) # [Batch size, 1, n_hidden]
scores = tf.reduce_sum(self.v * tf.tanh(encoded_ref + encoded_query), [-1], name="scores") # [Batch size, seq_length]
attention = tf.nn.softmax(scores,name="attention") # [Batch size, Seq_length]
"""
if self.inference_mode == True:
attention = tf.nn.softmax(scores/temperature, name="attention") # [Batch size, Seq_length]
else:
attention = tf.nn.softmax(self.C*tf.tanh(scores), name="attention") # [Batch size, Seq_length]
"""
return attention, scores
# One pass of the decode mechanism
def decode(self,prev_state,prev_input,timestep):
with tf.variable_scope("loop"):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
# Run the cell on a combination of the previous input and state
output,state=self.cell(prev_input,prev_state)
# Attention mechanism
distribution, scores=self.attention(self.encoder_output,output,self.temperature)
# Apply attention mask
masked_scores = scores - 100000000.*self.mask # [Batch size, seq_length]
# Multinomial distribution
prob = distr.Categorical(masked_scores)
# Sample from distribution
position = prob.sample()
position = tf.cast(position,tf.int32)
self.positions.append(position)
# Store log_prob for backprop
self.log_softmax.append(prob.log_prob(position))
# Update mask
self.mask = self.mask + tf.one_hot(position, self.seq_length)
# Retrieve decoder's new input
h = tf.transpose(self.encoder_output, [1, 0, 2]) # [Batch size x time steps x cell.state_size] to [time steps x Batch size x cell.state_size]
new_decoder_input = tf.gather(h,position)[0]
return state,new_decoder_input
def loop_decode(self,decoder_initial_state,decoder_first_input):
# decoder_initial_state: Tuple Tensor (c,h) of size [batch_size x cell.state_size]
# decoder_first_input: Tensor [batch_size x cell.state_size]
# Loop the decoding process and collect results
s,i = decoder_initial_state,tf.cast(decoder_first_input,tf.float32)
for step in range(self.seq_length):
s,i = self.decode(s,i,step)
# Stack visited indices
self.positions=tf.stack(self.positions,axis=1)
# Sum log_softmax over output steps
self.log_softmax=tf.add_n(self.log_softmax) #tf.reduce_sum(self.log_softmax,0)
# Return stacked lists of visited_indices and log_softmax for backprop
return self.positions,self.log_softmax | mit | -3,904,509,820,302,967,000 | 47.177966 | 153 | 0.62472 | false |
SCECcode/BBP | bbp/comps/plot_seismograms.py | 1 | 14658 | #!/usr/bin/python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Broadband module to plot seismograms and overlay graphs
"""
from __future__ import division, print_function
# Import Python modules
import sys
import matplotlib as mpl
mpl.use("AGG", warn=False)
import pylab
# Import plot config file
import plot_config
# S-wave velocity in km/s
S_VELOCITY = 4
def calculate_x_coords(ts1, rrup):
"""
Calculated the min_x and max_x points using the timestamps and
rrup as references
"""
plot_mode = plot_config.plot_seismograms_mode
max_ts = max(ts1)
if plot_mode == 2:
# Plot entire seismogram
return (0, max_ts)
if plot_mode == 1:
# Plot first "duration" seconds
return (0, plot_config.plot_seismograms_duration)
if max_ts <= plot_config.plot_seismograms_duration:
# Simulation is shorter than DURATION, plot everything
return (0, max_ts)
if rrup is None:
# R_rup not available, plot first DURATION seconds
return (0, min(max_ts, plot_config.plot_seismograms_duration))
# R_rup provided, use it to calculate plot window
min_x = float(rrup) / S_VELOCITY - 20.0
if min_x < 0:
min_x = 0
max_x = min_x + plot_config.plot_seismograms_duration
return (min_x, max_x)
def plot_seis(stat, filename, label, units, outfile, rrup=None):
"""
Plots the seismogram for station stat, and outputs a png file outfile
"""
ts1 = []
ns1 = []
ew1 = []
ud1 = []
cmt1 = ["", ""]
# Read input file
input_file = open(filename, 'r')
for data in input_file:
# Remove leading spaces
data = data.strip()
# Skip comments
if data.startswith('#') or data.startswith('%'):
if cmt1[0] == "":
cmt1[0] = data
else:
tmp = []
tmp = data.split()
ts1.append(float(tmp[0]))
ns1.append(float(tmp[1]))
ew1.append(float(tmp[2]))
ud1.append(float(tmp[3]))
# Don't forget to close the file
input_file.close()
min_x, max_x = calculate_x_coords(ts1, rrup)
min_horiz_y = 1.1 * min([min(ns1), min(ew1)])
max_horiz_y = 1.1 * max([max(ns1), max(ew1)])
min_vert_y = 1.1 * min(ud1)
max_vert_y = 1.1 * max(ud1)
pylab.clf()
pylab.suptitle('Run %s, station %s' %
(label, stat), size=14)
pylab.subplots_adjust(top=0.925)
pylab.subplots_adjust(bottom=0.07)
pylab.subplots_adjust(left=0.11)
pylab.subplots_adjust(right=0.975)
pylab.subplots_adjust(hspace=0.5)
pylab.subplots_adjust(wspace=0.3)
ax1 = pylab.subplot(311)
pylab.plot(ts1, ns1, lw=plot_config.line_width)
pylab.xlim(min_x, max_x)
pylab.ylim(min_horiz_y, max_horiz_y)
if units == 'vel':
pylab.ylabel("Velocity (cm/s)")
elif units == 'acc':
pylab.ylabel("Acceleration (cm/s/s)")
pylab.xlabel("Time (s)")
ax1.set_title("N/S", fontsize="small")
ax2 = pylab.subplot(312)
pylab.plot(ts1, ew1, lw=plot_config.line_width)
pylab.xlim(min_x, max_x)
pylab.ylim(min_horiz_y, max_horiz_y)
if units == 'vel':
pylab.ylabel("Velocity (cm/s)")
elif units == 'acc':
pylab.ylabel("Acceleration (cm/s/s)")
pylab.xlabel("Time (s)")
ax2.set_title("E/W", fontsize="small")
ax3 = pylab.subplot(313)
pylab.plot(ts1, ud1, lw=plot_config.line_width)
pylab.xlim(min_x, max_x)
pylab.ylim(min_vert_y, max_vert_y)
if units == 'vel':
pylab.ylabel("Velocity (cm/s)")
elif units == 'acc':
pylab.ylabel("Acceleration (cm/s/s)")
pylab.xlabel("Time (s)")
ax3.set_title("U/D", fontsize="small")
pylab.gcf().set_size_inches(6, 7)
#pylab.tight_layout()
pylab.savefig(outfile, format="png", dpi=plot_config.dpi)
pylab.close()
def read_seismogram_file(filename):
"""
This function reads a seismogram and returns 4 lists with the
horizontal components (ns and ew), vertical, and the timestamps
"""
# Start empty
ts = []
ns = []
ew = []
ud = []
# Read file
seis_file = open(filename, 'r')
for line in seis_file:
# Remove leading spaces
line = line.strip()
# Skip comments
if line.startswith('#') or line.startswith('%'):
continue
tmp = line.split()
if len(tmp) < 4:
print("Error reading seismogram in file %s" % (filename))
sys.exit(1)
ts.append(float(tmp[0]))
ns.append(float(tmp[1]))
ew.append(float(tmp[2]))
ud.append(float(tmp[3]))
# Close file
seis_file.close()
# All done
return (ts, ns, ew, ud)
def plot_overlay(stat, obs_filename, comp_filename, obs_label, comp_label,
outfile, y_label="Velocity (cm/s)",
goflabel=None, gofdata=None):
"""
This function plots observed and computed seismograms side by side
for easy comparison
"""
# Initialize variables
textx = 0.53
texty = 0.05
fig = pylab.plt.figure()
fig.clf()
ts1, ns1, ew1, ud1 = read_seismogram_file(obs_filename)
ts2, ns2, ew2, ud2 = read_seismogram_file(comp_filename)
# Determine min and max X and Y for N/S/E/W/U/D for scaling
min_x = 0
max_x = min(max([max(ts1), max(ts2)]), 100)
min_horiz_y = 1.1 * min([min(ns1), min(ns2), min(ew1), min(ew2)])
max_horiz_y = 1.1 * max([max(ns1), max(ns2), max(ew1), max(ew2)])
# Adjust so min and max are equal
if abs(min_horiz_y) > abs(max_horiz_y):
max_horiz_y = -1 * min_horiz_y
else:
min_horiz_y = -1 * max_horiz_y
min_vert_y = 1.1 * min([min(ud1), min(ud2)])
max_vert_y = 1.1 * max([max(ud1), max(ud2)])
if abs(min_vert_y) > abs(max_vert_y):
max_vert_y = -1 * min_vert_y
else:
min_vert_y = -1 * max_vert_y
if goflabel is None or gofdata is None:
fig.suptitle('%s vs %s, station %s' % (obs_label, comp_label, stat), size=14)
else:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[0])
fig.suptitle('%s vs %s, station %s (%s)' %
(obs_label, comp_label, stat, txt), size=14)
fig.subplots_adjust(top=0.85)
fig.subplots_adjust(left=0.075)
fig.subplots_adjust(right=0.925)
fig.subplots_adjust(hspace=0.3)
fig.subplots_adjust(wspace=0.3)
ax = fig.add_subplot(231, title='%s, N/S' % obs_label)
ax.plot(ts1, ns1, color='black', label=obs_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_ylabel(y_label)
ax = fig.add_subplot(234, title='%s, N/S' % comp_label)
ax.plot(ts2, ns2, color='red', label=comp_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_ylabel(y_label)
if goflabel is not None and gofdata is not None:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[2])
ax.text(textx, texty, txt, transform=ax.transAxes,
bbox=dict(facecolor='red', alpha=0.5))
#legend(prop=matplotlib.font_manager.FontProperties(size=10))
ax = fig.add_subplot(232, title='%s, E/W' % obs_label)
ax.plot(ts1, ew1, color='black', label=obs_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
#ylabel(y_label)
ax = fig.add_subplot(235, title='%s, E/W' % comp_label)
ax.plot(ts2, ew2, color='red', label=comp_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
if goflabel is not None and gofdata is not None:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[1])
ax.text(textx, texty, txt, transform=ax.transAxes,
bbox=dict(facecolor='red', alpha=0.5))
#ylabel(y_label)
#legend(prop=matplotlib.font_manager.FontProperties(size=10))
ax = fig.add_subplot(233, title='%s, U/D' % obs_label)
ax.plot(ts1, ud1, color='black', label=obs_label,
lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_vert_y, max_vert_y)
#ylabel(y_label)
ax = fig.add_subplot(236, title='%s, U/D' % comp_label)
ax.plot(ts2, ud2, color='red', label=comp_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_vert_y, max_vert_y)
if goflabel is not None and gofdata is not None:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[3])
ax.text(textx, texty, txt, transform=ax.transAxes,
bbox=dict(facecolor='red', alpha=0.5))
#ylabel(y_label)
#legend(prop=matplotlib.font_manager.FontProperties(size=10))
pylab.gcf().set_size_inches(10, 5)
pylab.savefig(outfile, format="png", dpi=plot_config.dpi)
pylab.close()
def plot_overlay_with_arias(stat, obs_filename, comp_filename,
obs_arias_n_filename, obs_arias_e_filename,
obs_arias_z_filename, comp_arias_n_filename,
comp_arias_e_filename, comp_arias_z_filename,
obs_label, comp_label, outfile, rrup=None,
y_label="Velocity (cm/s)",
goflabel=None, gofdata=None):
"""
This function plots observed and computed seismograms side by side
for easy comparison
"""
# Initialize variables
textx = 0.53
texty = 0.05
fig = pylab.plt.figure()
fig.clf()
# Read all files
(ts1, ns1, ew1, ud1) = read_seismogram_file(obs_filename)
(ts2, ns2, ew2, ud2) = read_seismogram_file(comp_filename)
ta1, _, _, an1 = read_seismogram_file(obs_arias_n_filename)
ta1, _, _, ae1 = read_seismogram_file(obs_arias_e_filename)
ta1, _, _, az1 = read_seismogram_file(obs_arias_z_filename)
ta2, _, _, an2 = read_seismogram_file(comp_arias_n_filename)
ta2, _, _, ae2 = read_seismogram_file(comp_arias_e_filename)
ta2, _, _, az2 = read_seismogram_file(comp_arias_z_filename)
# Determine min and max X and Y for N/S/E/W/U/D for scaling
min_x = 0
#max_x = min(max([max(ts1), max(ts2)]), 100)
max_x = max([max(ts1), max(ts2)])
min_horiz_y = 1.1 * min([min(ns1), min(ns2), min(ew1), min(ew2)])
max_horiz_y = 1.1 * max([max(ns1), max(ns2), max(ew1), max(ew2)])
# Adjust so min and max are equal
if abs(min_horiz_y) > abs(max_horiz_y):
max_horiz_y = -1 * min_horiz_y
else:
min_horiz_y = -1 * max_horiz_y
min_vert_y = 1.1 * min([min(ud1), min(ud2)])
max_vert_y = 1.1 * max([max(ud1), max(ud2)])
if abs(min_vert_y) > abs(max_vert_y):
max_vert_y = -1 * min_vert_y
else:
min_vert_y = -1 * max_vert_y
# For arias plots, min=0, max=100%
min_y_arias = 0
max_y_arias = 100
if goflabel is None or gofdata is None:
fig.suptitle('%s vs %s, station %s' % (obs_label, comp_label, stat), size=14)
else:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[0])
fig.suptitle('%s vs %s, station %s (%s)' %
(obs_label, comp_label, stat, txt), size=14)
fig.subplots_adjust(top=0.915)
fig.subplots_adjust(left=0.075)
fig.subplots_adjust(right=0.975)
fig.subplots_adjust(bottom=0.07)
fig.subplots_adjust(hspace=0.4)
fig.subplots_adjust(wspace=0.2)
ax = fig.add_subplot(321)
ax.plot(ts1, ns1, color='black', label=obs_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_title("Observation N/S", fontsize='small')
ax.set_ylabel(y_label)
ax.set_xlabel("Time (s)")
ax = fig.add_subplot(323)
ax.plot(ts2, ns2, color='red', label=comp_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_title("Simulation N/S", fontsize='small')
ax.set_ylabel(y_label)
ax.set_xlabel("Time (s)")
if goflabel is not None and gofdata is not None:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[2])
ax.text(textx, texty, txt, transform=ax.transAxes,
bbox=dict(facecolor='red', alpha=0.5))
ax = fig.add_subplot(322)
ax.plot(ts1, ew1, color='black', label=obs_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_title("Observation E/W", fontsize='small')
ax.set_ylabel(y_label)
ax.set_xlabel("Time (s)")
ax = fig.add_subplot(324)
ax.plot(ts2, ew2, color='red', label=comp_label, lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_horiz_y, max_horiz_y)
ax.set_title("Simulation E/W", fontsize='small')
ax.set_ylabel(y_label)
ax.set_xlabel("Time (s)")
if goflabel is not None and gofdata is not None:
txt = '$%s_{%s}$=%.1f %%' % (goflabel[0], goflabel[1], gofdata[1])
ax.text(textx, texty, txt, transform=ax.transAxes,
bbox=dict(facecolor='red', alpha=0.5))
ax = fig.add_subplot(325, title='N/S')
ax.plot(ta1, an1, color='black', lw=plot_config.line_width)
ax.plot(ta2, an2, color='red', lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y_arias, max_y_arias)
ax.set_title("N/S", fontsize='small')
ax.set_ylabel("Norm Arias Int (%)")
ax.set_xlabel("Time (s)")
ax = fig.add_subplot(326, title='E/W')
ax.plot(ta1, ae1, color='black', lw=plot_config.line_width)
ax.plot(ta2, ae2, color='red', lw=plot_config.line_width)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y_arias, max_y_arias)
ax.set_title("E/W", fontsize='small')
ax.set_ylabel("Norm Arias Int (%)")
ax.set_xlabel("Time (s)")
pylab.gcf().set_size_inches(10, 7.5)
pylab.savefig(outfile, format="png", dpi=plot_config.dpi)
pylab.close()
if __name__ == '__main__':
plot_overlay(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], sys.argv[6])
#plot_seis(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5])
| apache-2.0 | -7,339,043,354,509,569,000 | 35.014742 | 85 | 0.598786 | false |
thorrak/fermentrack | gravity/tilt/TiltHydrometer.py | 1 | 11713 | import datetime
from typing import List, Dict, TYPE_CHECKING
from collections import deque
from gravity.models import TiltConfiguration, GravityLogPoint, GravitySensor
# from asgiref.sync import sync_to_async
class TiltHydrometer(object):
# These are all the UUIDs currently available as Tilt colors
tilt_colors = {
'Red': "a495bb10-c5b1-4b44-b512-1370f02d74de",
'Green': "a495bb20-c5b1-4b44-b512-1370f02d74de",
'Black': "a495bb30-c5b1-4b44-b512-1370f02d74de",
'Purple': "a495bb40-c5b1-4b44-b512-1370f02d74de",
'Orange': "a495bb50-c5b1-4b44-b512-1370f02d74de",
'Blue': "a495bb60-c5b1-4b44-b512-1370f02d74de",
'Yellow': "a495bb70-c5b1-4b44-b512-1370f02d74de",
'Pink': "a495bb80-c5b1-4b44-b512-1370f02d74de",
} # type: Dict[str, str]
# color_lookup is created at first use in color_lookup
color_lookup_table = {} # type: Dict[str, str]
color_lookup_table_no_dash = {} # type: Dict[str, str]
def __init__(self, color: str):
self.color = color # type: str
# The smoothing_window is set in the TiltConfiguration object - just defaulting it here for now
self.smoothing_window = 60 # type: int
self.gravity_list = deque(maxlen=self.smoothing_window) # type: deque[float]
self.temp_list = deque(maxlen=self.smoothing_window) # type: deque[int]
self.last_value_received = datetime.datetime.now() - self._cache_expiry_seconds() # type: datetime.datetime
self.last_saved_value = datetime.datetime.now() # type: datetime.datetime
self.gravity = 0.0 # type: float
self.raw_gravity = 0.0 # type: float
# Note - temp is always in fahrenheit
self.temp = 0 # type: int
self.raw_temp = 0 # type: int
self.rssi = 0 # type: int
# v3 and newer Tilts use the tx_pwr field to send the battery life
self.sends_battery = False # type: bool
self.weeks_on_battery = 0 # type: int
self.firmware_version = 0
# Tilt Pros are determined when we receive a gravity reading > 5000
self.tilt_pro = False # type: bool
self.obj = None # type: TiltConfiguration
# Let's load the object from Fermentrack as part of the initialization
self.load_obj_from_fermentrack()
if self.obj is not None:
self.temp_format = self.obj.sensor.temp_format
else:
self.temp_format = GravitySensor.TEMP_FAHRENHEIT # Defaulting to Fahrenheit as that's what the Tilt sends
def __str__(self):
return self.color
def _cache_expiry_seconds(self) -> datetime.timedelta:
# Assume we get 1 out of every 4 readings
return datetime.timedelta(seconds=(self.smoothing_window * 1.2 * 4))
def _cache_expired(self) -> bool:
if self.obj is not None:
# The other condition we want to explicitly clear the cache is if the temp format has changed between what
# was loaded from the sensor object & what we previously had cached when the object was loaded
if self.temp_format != self.obj.sensor.temp_format:
# Clear the cached temp/gravity values &
self.temp_format = self.obj.sensor.temp_format # Cache the new temp format
return True
return self.last_value_received <= datetime.datetime.now() - self._cache_expiry_seconds()
def _add_to_list(self, gravity, temp):
# This adds a gravity/temp value to the list for smoothing/averaging
if self._cache_expired():
# The cache expired (we lost contact with the Tilt for too long). Clear the lists.
self.gravity_list.clear()
self.temp_list.clear()
# Thankfully, deque enforces queue length, so all we need to do is add the value
self.last_value_received = datetime.datetime.now()
self.gravity_list.append(gravity)
self.temp_list.append(temp)
def should_save(self) -> bool:
if self.obj is None:
return False
return self.last_saved_value <= datetime.datetime.now() - datetime.timedelta(seconds=(self.obj.polling_frequency))
# def process_ibeacon_info(self, ibeacon_info: IBeaconAdvertisement, rssi):
# self.raw_gravity = ibeacon_info.minor / 1000
# if self.obj is None:
# # If there is no TiltConfiguration object set, just use the raw gravity the Tilt provided
# self.gravity = self.raw_gravity
# else:
# # Otherwise, apply the calibration
# self.gravity = self.obj.apply_gravity_calibration(self.raw_gravity)
#
# # Temps are always provided in degrees fahrenheit - Convert to Celsius if required
# # Note - convert_temp_to_sensor returns as a tuple (with units) - we only want the degrees not the units
# self.raw_temp, _ = self.obj.sensor.convert_temp_to_sensor_format(ibeacon_info.major,
# GravitySensor.TEMP_FAHRENHEIT)
# self.temp = self.raw_temp
# self.rssi = rssi
# self._add_to_list(self.gravity, self.temp)
def process_decoded_values(self, sensor_gravity: int, sensor_temp: int, rssi: int, tx_pwr: int):
if sensor_temp == 999:
# For the latest Tilts, this is now actually a special code indicating that the gravity is the version info.
# Regardless of whether or not we end up doing anything with that information, we definitely do not want to
# add it to the list
self.firmware_version = sensor_gravity
return
if sensor_gravity >= 5000:
# Tilt Pro support
self.tilt_pro = True
self.raw_gravity = sensor_gravity / 10000
usable_temp = sensor_temp / 10
else:
# Tilt "Classic" support
self.tilt_pro = False
self.raw_gravity = sensor_gravity / 1000
usable_temp = sensor_temp
# v3 Tilts send battery age in weeks using the tx_pwr field, but they have a hack in place to maintain
# compatibility with iPhones where they alternate sending "197" (unsigned) or "-59" (signed) with the actual
# number of weeks since the battery was changed. If we see the 197 (-59) then we'll set "sends_battery" to true
# and then update the weeks_on_battery the next time we see a beacon
if tx_pwr == 197:
self.sends_battery = True
elif self.sends_battery:
self.weeks_on_battery = tx_pwr
if self.obj is None:
# If there is no TiltConfiguration object set, just use the raw gravity the Tilt provided
self.gravity = self.raw_gravity
self.raw_temp = usable_temp
else:
# Otherwise, apply the calibration
self.gravity = self.obj.apply_gravity_calibration(self.raw_gravity)
# Temps are always provided in degrees fahrenheit - Convert to Celsius if required
# Note - convert_temp_to_sensor returns as a tuple (with units) - we only want the degrees not the units
self.raw_temp, _ = self.obj.sensor.convert_temp_to_sensor_format(usable_temp,
GravitySensor.TEMP_FAHRENHEIT)
self.temp = self.raw_temp
self.rssi = rssi
self._add_to_list(self.gravity, self.temp)
def smoothed_gravity(self):
# Return the average gravity in gravity_list
if len(self.gravity_list) <= 0:
return None
grav_total = 0
for grav in self.gravity_list:
grav_total += grav
return round(grav_total / len(self.gravity_list), 3) # Average it out & round
def smoothed_temp(self):
# Return the average temp in temp_list
if len(self.temp_list) <= 0:
return None
temp_total = 0
for temp in self.temp_list:
temp_total += temp
return round(temp_total / len(self.temp_list), 3) # Average it out & round
@classmethod
def color_lookup(cls, color):
if len(cls.color_lookup_table) <= 0:
cls.color_lookup_table = {cls.tilt_colors[x]: x for x in cls.tilt_colors}
if len(cls.color_lookup_table_no_dash) <= 0:
cls.color_lookup_table_no_dash = {cls.tilt_colors[x].replace("-",""): x for x in cls.tilt_colors}
if color in cls.color_lookup_table:
return cls.color_lookup_table[color]
elif color in cls.color_lookup_table_no_dash:
return cls.color_lookup_table_no_dash[color]
else:
return None
def print_data(self):
print("{} Tilt: {} ({}) / {} F".format(self.color, self.smoothed_gravity(), self.gravity, self.temp))
# @sync_to_async
def load_obj_from_fermentrack(self, obj: TiltConfiguration = None):
if obj is None:
# If we weren't handed the object itself, try to load it
try:
obj = TiltConfiguration.objects.get(color=self.color,
connection_type=TiltConfiguration.CONNECTION_BLUETOOTH)
except:
# TODO - Rewrite this slightly
self.obj = None
return False
# If the smoothing window changed, just recreate the deque objects
if obj.smoothing_window_vals != self.smoothing_window:
self.smoothing_window = obj.smoothing_window_vals
self.gravity_list = deque(maxlen=self.smoothing_window)
self.temp_list = deque(maxlen=self.smoothing_window)
self.obj = obj
# @sync_to_async
def save_value_to_fermentrack(self, verbose=False):
if self.obj is None:
# If we don't have a TiltConfiguration object loaded, we can't save the data point
if verbose:
print("{} Tilt: No object loaded for this color".format(self.color))
return False
if self._cache_expired():
if verbose:
print("{} Tilt: Cache is expired/No data available to save".format(self.color))
return False
if self.smoothed_gravity() is None or self.smoothed_temp() is None:
if verbose:
print("{} Tilt: No data available to save".format(self.color))
return False
# TODO - Test that temp_format actually works as intended here
new_point = GravityLogPoint(
gravity=self.smoothed_gravity(),
gravity_latest=self.gravity,
temp=self.smoothed_temp(),
temp_latest=self.temp,
temp_format=self.obj.sensor.temp_format,
temp_is_estimate=False,
associated_device=self.obj.sensor,
)
if self.obj.sensor.active_log is not None:
new_point.associated_log = self.obj.sensor.active_log
new_point.save()
# Also, set/save the RSSI/Raw Temp/Raw Gravity so we can load it for debugging
self.obj.rssi = self.rssi
self.obj.raw_gravity = self.raw_gravity
self.obj.raw_temp = self.raw_temp
self.obj.tilt_pro = self.tilt_pro
self.obj.sends_battery = self.sends_battery
self.obj.weeks_on_battery = self.weeks_on_battery
self.obj.firmware_version = self.firmware_version
self.obj.save_extras_to_redis()
self.last_saved_value = datetime.datetime.now()
if verbose:
print("{} Tilt: Logging {}".format(self.color, self.smoothed_gravity()))
else:
if verbose:
print("No data received.") | mit | -2,648,578,538,299,808,000 | 42.546468 | 122 | 0.61086 | false |
prefetchnta/questlab | bin/x64bin/python/37/Lib/fnmatch.py | 1 | 4184 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j]
if '--' not in stuff:
stuff = stuff.replace('\\', r'\\')
else:
chunks = []
k = i+2 if pat[i] == '!' else i+1
while True:
k = pat.find('-', k, j)
if k < 0:
break
chunks.append(pat[i:k])
i = k+1
k = k+3
chunks.append(pat[i:j])
# Escape backslashes and hyphens for set difference (--).
# Hyphens that create ranges shouldn't be escaped.
stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-')
for s in chunks)
# Escape set operations (&&, ~~ and ||).
stuff = re.sub(r'([&~|])', r'\\\1', stuff)
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] in ('^', '['):
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return r'(?s:%s)\Z' % res
| lgpl-2.1 | 1,415,220,731,511,954,200 | 30.6875 | 79 | 0.487094 | false |
anqxyr/pyscp | pyscp/orm.py | 1 | 5732 | #!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import concurrent.futures
import logging
import peewee
import queue
from itertools import islice
###############################################################################
# Global Constants And Variables
###############################################################################
log = logging.getLogger('pyscp.orm')
pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
queue = queue.Queue()
def queue_execution(fn, args=(), kw={}):
queue.put(dict(fn=fn, args=args, kw=kw))
pool.submit(async_write)
###############################################################################
# Database ORM Classes
###############################################################################
db = peewee.Proxy()
class BaseModel(peewee.Model):
class Meta:
database = db
@classmethod
def create(cls, **kw):
queue_execution(fn=super().create, kw=kw)
@classmethod
def create_table(cls):
if not hasattr(cls, '_id_cache'):
cls._id_cache = []
queue_execution(fn=super().create_table, args=(True,))
@classmethod
def insert_many(cls, data):
data_iter = iter(data)
chunk = list(islice(data_iter, 500))
while chunk:
queue_execution(
fn=lambda x: super(BaseModel, cls).insert_many(x).execute(),
args=(chunk, ))
chunk = list(islice(data_iter, 500))
@classmethod
def convert_to_id(cls, data, key='user'):
for row in data:
if row[key] not in cls._id_cache:
cls._id_cache.append(row[key])
row[key] = cls._id_cache.index(row[key]) + 1
yield row
@classmethod
def write_ids(cls, field_name):
cls.insert_many([
{'id': cls._id_cache.index(value) + 1, field_name: value}
for value in set(cls._id_cache)])
cls._id_cache.clear()
class ForumCategory(BaseModel):
title = peewee.CharField()
description = peewee.TextField()
class ForumThread(BaseModel):
category = peewee.ForeignKeyField(ForumCategory, null=True)
title = peewee.CharField(null=True)
description = peewee.TextField(null=True)
class Page(BaseModel):
url = peewee.CharField(unique=True)
html = peewee.TextField()
thread = peewee.ForeignKeyField(
ForumThread, related_name='page', null=True)
class User(BaseModel):
name = peewee.CharField(unique=True)
class Revision(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='revisions', index=True)
user = peewee.ForeignKeyField(User, related_name='revisions', index=True)
number = peewee.IntegerField()
time = peewee.DateTimeField()
comment = peewee.CharField(null=True)
class Vote(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='votes', index=True)
user = peewee.ForeignKeyField(User, related_name='votes', index=True)
value = peewee.IntegerField()
class ForumPost(BaseModel):
thread = peewee.ForeignKeyField(
ForumThread, related_name='posts', index=True)
user = peewee.ForeignKeyField(User, related_name='posts', index=True)
parent = peewee.ForeignKeyField('self', null=True)
title = peewee.CharField(null=True)
time = peewee.DateTimeField()
content = peewee.TextField()
class Tag(BaseModel):
name = peewee.CharField(unique=True)
class PageTag(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='tags', index=True)
tag = peewee.ForeignKeyField(Tag, related_name='pages', index=True)
class OverrideType(BaseModel):
name = peewee.CharField(unique=True)
class Override(BaseModel):
url = peewee.ForeignKeyField(Page, to_field=Page.url, index=True)
user = peewee.ForeignKeyField(User, index=True)
type = peewee.ForeignKeyField(OverrideType)
class ImageStatus(BaseModel):
name = peewee.CharField(unique=True)
class Image(BaseModel):
url = peewee.CharField(unique=True)
source = peewee.CharField()
data = peewee.BlobField()
status = peewee.ForeignKeyField(ImageStatus)
notes = peewee.TextField(null=True)
###############################################################################
# Helper Functions
###############################################################################
def async_write(buffer=[]):
item = queue.get()
buffer.append(item)
if len(buffer) > 500 or queue.empty():
log.debug('Processing {} queue items.'.format(len(buffer)))
with db.transaction():
write_buffer(buffer)
buffer.clear()
def write_buffer(buffer):
for item in buffer:
try:
item['fn'](*item.get('args', ()), **item.get('kw', {}))
except:
log.exception(
'Exception while processing queue item: {}'
.format(item))
queue.task_done()
def create_tables(*tables):
for table in tables:
eval(table).create_table()
def connect(dbpath):
log.info('Connecting to the database at {}'.format(dbpath))
db.initialize(peewee.SqliteDatabase(dbpath))
db.connect()
###############################################################################
# Macros
###############################################################################
def votes_by_user(user):
up, down = [], []
for vote in (Vote.select().join(User).where(User.name == user)):
if vote.value == 1:
up.append(vote.page.url)
else:
down.append(vote.page.url)
return {'+': up, '-': down}
| mit | -6,637,355,382,847,117,000 | 27.66 | 79 | 0.552512 | false |
blueboxjesse/ursula | roles/horizon/templates/opt/stack/horizon/openstack_dashboard/local/local_settings.py | 1 | 8245 | import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_OFFLINE = True
ALLOWED_HOSTS = ['*']
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specfic API version for a given service API.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
# OPENSTACK_API_VERSIONS = {
# "identity": 3
# }
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
# Specify a regular expression to validate user passwords.
# HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements.")
# }
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
# HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for the login form if so desired.
# HORIZON_CONFIG["password_autocomplete"] = "off"
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizion generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
# may be situations where you would want to set this explicitly, e.g. when
# multiple dashboard instances are distributed on different machines (usually
# behind a load-balancer). Either you have to make sure that a session gets all
# requests routed to the same dashboard instance or you set the same SECRET_KEY
# for all of them.
from horizon.utils import secret_key
SECRET_KEY = "{{ secrets.horizon_secret_key }}"
{% macro memcached_hosts() -%}
{% for host in groups['controller'] -%}
{% if loop.last -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}'
{%- else -%}
'{{ hostvars[host][primary_interface]['ipv4']['address'] }}:{{ memcached.port }}',
{%- endif -%}
{% endfor -%}
{% endmacro -%}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION' : [
{{ memcached_hosts() }}
]
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
# AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
# ]
{% if horizon.keystone_api_version == 3 -%}
AVAILABLE_REGIONS = [
('https://{{ endpoints.main }}:5001/v3', 'RegionOne'),
]
{% endif -%}
OPENSTACK_HOST = "{{ endpoints.main }}"
OPENSTACK_KEYSTONE_URL = "https://%s:5001/v{{horizon.keystone_api_version}}" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "service"
# Disable SSL certificate checks (useful for self-signed certificates):
OPENSTACK_SSL_NO_VERIFY = True
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_project': True,
'can_edit_domain': True
}
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
# NOTE: as of Grizzly this is not yet supported in Nova so enabling this
# setting will not do anything useful
'can_encrypt_volumes': False
}
# The OPENSTACK_QUANTUM_NETWORK settings can be used to enable optional
# services provided by quantum. Currently only the load balancer service
# is available.
#OPENSTACK_QUANTUM_NETWORK = {
# 'enable_lb': True
#}
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'internalURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'propagate': False,
}
}
}
| mit | 7,761,227,557,051,652,000 | 33.78903 | 93 | 0.662947 | false |
lmaurits/BEASTling | beastling/models/binary.py | 1 | 17129 | import collections
from .basemodel import BaseModel
from beastling.util import xml
from beastling.util import log
class BinaryModel(BaseModel):
def __init__(self, model_config, global_config):
BaseModel.__init__(self, model_config, global_config)
self.remove_constant_features = model_config.remove_constant_features
self.gamma_categories = int(model_config.options.get("gamma_categories", 0))
# Compute feature properties early to facilitate auto-detection of binarisation
self.compute_feature_properties()
# Don't need a separating comma because each datapoint is a string
# of length 1
self.data_separator = ""
self.binarised = model_config.binarised
# Do we need to recode multistate data?
self.recoded = any(self.valuecounts[f] > 2 for f in self.features)
# Check for inconsistent configuration
if self.recoded and self.binarised:
raise ValueError("Data for model '%s' contains features with more than two states, but binarised=True was given. Have you specified the correct data file or feature list?" % self.name)
def add_state(self, state):
BaseModel.add_state(self, state)
if self.gamma_categories > 0:
xml.parameter(state, text="1.0", id="gammaShape.s:%s" % self.name, name="stateNode")
def add_frequency_state(self, state):
attribs = {
"id":"freqs_param.s:%s" % self.name,
"spec":"parameter.RealParameter",
"dimension":"2",
"lower":"0.0",
"upper":"1.0",
}
if self.share_params:
xml.stateNode(state, text="0.5 0.5", attrib=attribs)
else:
for f in self.features:
fname = "%s:%s" % (self.name, f)
attribs["id"] = "freqs_param.s:%s" % fname
xml.stateNode(state, text="0.5 0.5", attrib=attribs)
def add_sitemodel(self, distribution, feature, fname):
if feature == None and fname == None:
mr = "1.0"
id_ = "SiteModel.%s" % self.name
else:
mr = self.get_mutation_rate(feature, fname)
id_ = "SiteModel.%s" % fname
attribs = { "id":id_,
"spec":"SiteModel",
"mutationRate":mr,
"proportionInvariant":"0"
}
if self.gamma_categories > 0:
attribs["gammaCategoryCount"] = str(self.gamma_categories)
attribs["shape"] = "@gammaShape.s:%s" % self.name
sitemodel = xml.siteModel(distribution, attrib=attribs)
self.add_substmodel(sitemodel, feature, fname)
def compute_weights(self):
if not self.recoded:
BaseModel.compute_weights(self)
else:
self.weights = []
if self.rate_partition:
for part in sorted(list(set(self.rate_partition.values()))):
weight = 0
for f in self.features:
if self.rate_partition[f] == part:
weight += self.valuecounts[f]
self.weights.append(weight)
else:
for f in self.features:
self.weights.append(self.valuecounts[f])
def set_ascertained(self):
"""
Decide whether or not to do ascertainment correction for non-constant
data, unless the user has provided an explicit setting.
"""
if self.ascertained == None:
# For binary models, it is possible to retain constant data
# So we need to be more careful about automatically setting the value
# of ascertained.
if self.constant_feature:
# There's a constant feature in the data, so we definitely shouldn't
# do ascertainment correction for non-constant data
self.ascertained = False
elif self.constant_feature_removed:
# BEASTling personally removed a constant feature, so we definitely
# should do ascertainment correction if timing information is
# important
self.ascertained = not self.global_config.arbitrary_tree
else:
# We didn't see any constant features, but we also didn't remove
# any. So we don't quite know what to do...
# Most data sources are *probably* non-constant, so do ascertainment
# if the tree is calibrated, but inform the user.
# This duplicates above, but this condition is a default guess
# which we may change in future, whereas the above is a logical
# necessity, so it makes sense to separate these cases
self.ascertained = not self.global_config.arbitrary_tree
elif self.ascertained and not self.remove_constant_features:
raise ValueError("Incompatible settings for model '%s': ascertained=True and "
"remove_constant_features=False together constitute a model "
"misspecification.")
# If the data has only two values, we need to decide what kind to treat
# it as
if not self.recoded:
if type(self.binarised) == bool:
self.recoded = self.binarised
else:
# Data is binary but we haven't been told if it's "real binary"
# data or recoded multistate data. Assume it's real but if
# constant features have been retained then alert the user
# because this could cause problems.
self.recoded = False
if not self.ascertained:
log.info(
"Assuming that data source %s contains binary structural data "
"(e.g. absence/presence). If this is cognate set data which has been "
"pre-binarised, please set \"binarised=True\" in your config to enable "
"appropriate ascertainment correction for the recoding. If you don't do "
"this, estimates of branch lengths and clade ages may be "
"biased." % self.data_filename,
model=self)
def compute_feature_properties(self):
"""Compute various items of metadata for all remaining features.
This is very similar to the `compute_feature_probability` method of
BaseModel, but accounts for the possibility of having multiple values
present.
"""
self.valuecounts = {}
self.extracolumns = collections.defaultdict(int)
self.unique_values = {}
self.missing_ratios = {}
self.counts = {}
self.codemaps = {}
self.feature_value_partially_unknown = {}
for f in self.features:
# Compute various things
all_values = []
# Track whether any “unknown” values were encountered. The
# difference between “unknown” and “absent” values matters: Assume
# we have a feature with 3 possible values, A, B and C. Then "A"
# would be binarized as "100", "B" as "010", "AB" as "110", "-" as
# "000", "A-" as "100", but "?" as "???" and "A?" as "1??".
for l, values in self.data.items():
if f in values:
raw = values[f]
while "-" in raw:
raw.remove("-")
if "?" in raw:
raw = [x for x in raw if x!='?']
all_values.append(raw)
missing_data_ratio = 1 - len(all_values) / len(self.data)
non_q_values = [v for vs in all_values for v in vs]
assert None not in non_q_values
counts = {}
for v in non_q_values:
counts[v] = non_q_values.count(v)
unique_values = list(set(non_q_values))
# Sort unique_values carefully.
# Possibly all feature values are numeric strings, e.g. "1", "2", "3".
# If we sort these as strings then we get weird things like "10" < "2".
# This can actually matter for things like ordinal models.
# So convert these to ints first...
if all([v.isdigit() for v in unique_values]):
unique_values = list(map(int, unique_values))
unique_values.sort()
unique_values = list(map(str, unique_values))
# ...otherwise, just sort normally
else:
unique_values.sort()
self.unique_values[f] = unique_values
N = len(unique_values)
self.valuecounts[f] = N
self.missing_ratios[f] = missing_data_ratio
self.counts[f] = counts
self.codemaps[f] = self.build_codemap(unique_values)
def pattern_names(self, feature):
"""Content of the columns corresponding to this feature in the alignment.
This method is used for displaying helpful column names in ancestral
state reconstruction output. It gives column headers for actual value
columns as well as for dummy columns used in ascertainment correction,
if such columns exist.
"""
return (["{:}_dummy{:d}".format(feature, i) for i in range(len(self.extracolumns[feature]))] +
["{:}_{:}".format(feature, i) for i in self.unique_values[feature]])
def format_datapoint(self, feature, point):
if not self.recoded:
# This is "true binary" data, and doesn't need to be
# treated any differently to usual.
return BaseModel.format_datapoint(self, feature, point)
else:
# This is multistate data recoded into binary data.
if self.ascertained:
extra_columns = ["0", "1"]
else:
# If we are not ascertaining on non-constant data, we still
# need to add one "all zeros" column to account for the recoding
extra_columns = ["0"]
self.extracolumns[feature] = extra_columns
# Start with all zeros/question marks
if "?" in point:
point.remove("?")
absent = "?"
else:
absent = "0"
valuestring = extra_columns + [
absent for i in range(0, self.valuecounts[feature])]
# Set the appropriate data column to 1
for subpoint in point:
if subpoint == "?":
continue
valuestring[
len(extra_columns) +
self.unique_values[feature].index(subpoint)] = "1"
valuestring = "".join(valuestring)
return valuestring
def add_feature_data(self, distribution, index, feature, fname):
data = BaseModel.add_feature_data(self, distribution, index, feature, fname)
if self.recoded:
data.set("ascertained", "true")
data.set("excludefrom", "0")
if self.ascertained:
data.set("excludeto", "2")
else:
data.set("excludeto", "1")
def add_operators(self, run):
BaseModel.add_operators(self, run)
if self.gamma_categories > 0:
xml.operator(
run,
id="gammaShapeScaler.s:%s" % self.name,
spec="ScaleOperator",
parameter="@gammaShape.s:%s" % self.name,
scaleFactor="0.5",
weight="0.1")
def add_frequency_operators(self, run):
for name in self.parameter_identifiers():
xml.operator(
run,
id="frequency_sampler.s:%s" % name,
spec="DeltaExchangeOperator",
parameter="@freqs_param.s:%s" % name,
delta="0.01",
weight="1.0")
def add_param_logs(self, logger):
BaseModel.add_param_logs(self, logger)
if self.gamma_categories > 0:
xml.log(logger, idref="gammaShape.s:%s" % self.name)
def add_frequency_logs(self, logger):
for name in self.parameter_identifiers():
xml.log(logger, idref="freqs_param.s:%s" % name)
class BinaryModelWithShareParams(BinaryModel):
def __init__(self, model_config, global_config):
BinaryModel.__init__(self, model_config, global_config)
self.share_params = model_config.share_params
partial_reconstruct = self.reconstruct and (set(self.features) - set(self.reconstruct))
self.single_sitemodel = self.share_params and not (
self.rate_variation or self.feature_rates or
partial_reconstruct)
def build_freq_str(self, feature=None):
assert feature or self.share_params
if feature is None:
features = self.features
else:
features = [feature]
all_data = []
if self.binarised:
for f in features:
for lang in self.data:
for value in self.data[lang][f]:
if value == "?":
continue
dpoint, index = value, self.unique_values[f].index(value)
all_data.append(index)
else:
for f in features:
for lang in self.data:
all_data_points = set(self.data[lang].get(f, ["?"]))
if "?" in all_data_points:
valuestring = "".join(["?" for i in range(0,len(self.unique_values[f])+1)])
else:
valuestring = ["0" for i in range(0,len(self.unique_values[f])+1)]
for value in all_data_points - {"?"}:
valuestring[self.unique_values[f].index(value)+1] = "1"
all_data.extend(valuestring)
all_data = [d for d in all_data if d !="?"]
all_data = [int(d) for d in all_data]
zerf = 1.0*all_data.count(0) / len(all_data)
onef = 1.0*all_data.count(1) / len(all_data)
assert abs(1.0 - (zerf+onef)) < 1e-6
return "%.2f %.2f" % (zerf, onef)
def parameter_identifiers(self):
if self.share_params:
return [self.name]
else:
return ["{:s}:{:s}".format(self.name, f) for f in self.features]
def add_likelihood(self, likelihood):
if self.single_sitemodel:
self.add_single_sitemodel_likelihood(likelihood)
else:
BaseModel.add_likelihood(self, likelihood)
def add_single_sitemodel_likelihood(self, likelihood):
attribs = {"id": "DataLikelihood:%s" % self.name,
"spec": "TreeLikelihood",
"useAmbiguities": "true",
"branchRateModel": "@%s" % self.clock.branchrate_model_id,
"tree": "@Tree.t:beastlingTree",
}
distribution = xml.distribution(likelihood, attrib=attribs)
if not self.reconstruct:
pass
elif set(self.reconstruct) >= set(self.features):
# Use a different likelihood spec (also depending on whether
# the whole tree is reconstructed, or only some nodes)
if self.treewide_reconstruction:
distribution.attrib["spec"] = "ancestralstatetreelikelihood"
self.treedata.append(attribs["id"])
distribution.attrib["tag"] = f
else:
distribution.attrib["spec"] = "lucl.beast.statereconstruction.ancestralstateslogger"
distribution.attrib["value"] = " ".join(self.pattern_names(f))
for label in self.reconstruct_at:
langs = self.config.language_group(label)
self.beastxml.add_taxon_set(distribution, label, langs)
self.metadata.append(attribs["id"])
distribution.attrib["useAmbiguities"] = "false"
else:
raise NotImplementedError(
"The model {:} is a binarised model with a single site "
"model, so it uses a global likelihood. Reconstructing "
"only a subset of features is not supported.".format(self.name))
self.add_sitemodel(distribution, None, None)
data = xml.data(
distribution,
id="filtered_data_%s" % self.name,
spec="FilteredAlignment",
data="@data_%s" % self.name,
filter="-")
if self.recoded:
data.set("ascertained", "true")
data.set("excludefrom", "0")
if self.ascertained:
data.set("excludeto", "2")
else:
data.set("excludeto", "1")
data.append(self.get_userdatatype(None, None))
def add_likelihood_loggers(self, logger):
if self.single_sitemodel:
None
else:
BaseModel.add_likelihood_loggers(self, logger)
| bsd-2-clause | -6,377,114,824,417,228,000 | 42.777494 | 197 | 0.550973 | false |
jpopelka/atomic-reactor | atomic_reactor/plugins/exit_sendmail.py | 1 | 11470 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from email.mime.text import MIMEText
import os
import smtplib
import socket
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from dockerfile_parse import DockerfileParser
import requests
from atomic_reactor.plugin import ExitPlugin, PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.source import GitSource
class SendMailPlugin(ExitPlugin):
"""This plugins sends notifications about build results.
Example configuration (see arguments for init for detailed explanation):
"exit_plugins": [{
"name": "sendmail",
"args": {
"send_on": ["auto_canceled", "auto_fail"],
"url": "https://openshift-instance.com",
"pdc_url": "https://pdc-instance.com",
# pdc_secret_path is filled in automatically by osbs-client
"pdc_secret_path": "/path/to/file/with/pdc/token",
"smtp_uri": "smtp-server.com",
"from_address": "[email protected]",
"error_addresses": ["[email protected]"],
# optional arguments follow
"submitter": "John Smith <[email protected]>",
"pdc_verify_cert": true,
"pdc_component_df_label": "BZComponent",
"pdc_contact_role": "Devel_Owner"
}
}]
"""
key = "sendmail"
# symbolic constants for states
MANUAL_SUCCESS = 'manual_success'
MANUAL_FAIL = 'manual_fail'
AUTO_SUCCESS = 'auto_success'
AUTO_FAIL = 'auto_fail'
AUTO_CANCELED = 'auto_canceled'
allowed_states = set([MANUAL_SUCCESS, MANUAL_FAIL, AUTO_SUCCESS, AUTO_FAIL, AUTO_CANCELED])
PDC_TOKEN_FILE = 'pdc.token'
PDC_CONTACT_ROLE = 'Devel_Owner'
def __init__(self, tasker, workflow, send_on=None, url=None, submitter='unknown', pdc_url=None,
pdc_verify_cert=True, pdc_component_df_label="BZComponent", pdc_secret_path=None,
pdc_contact_role=None, smtp_uri=None, from_address=None,
error_addresses=None):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param send_on: list of build states when a notification should be sent
:param url: URL to OSv3 instance where the build logs are stored
:param submitter: name of user who submitted a build (plain string)
:param pdc_url: URL of PDC to query for contact information
:param pdc_verify_cert: whether or not to verify SSL cert of PDC (defaults to True)
:param pdc_component_df_label: name of Dockerfile label to use as PDC global_component
:param pdc_secret_path: path to pdc.token file; $SOURCE_SECRET_PATH otherwise
:param pdc_contact_role: name of PDC role to contact
:param smtp_uri: URL of SMTP server to use to send the message (e.g. "foo.com:25")
:param from_address: the "From" of the notification email
:param error_addresses: list of email addresses where to send an email if there's an error
(e.g. if we can't find out who to notify about the failed build)
"""
super(SendMailPlugin, self).__init__(tasker, workflow)
self.send_on = send_on
self.url = url
self.submitter = submitter
self.pdc_url = pdc_url
self.pdc_verify_cert = pdc_verify_cert
self.pdc_component_df_label = pdc_component_df_label
self.pdc_secret_path = pdc_secret_path
self.pdc_contact_role = pdc_contact_role or self.PDC_CONTACT_ROLE
self.smtp_uri = smtp_uri
self.from_address = from_address
self.error_addresses = error_addresses
def _should_send(self, rebuild, success, canceled):
"""Return True if any state in `self.send_on` meets given conditions, thus meaning
that a notification mail should be sent.
"""
should_send = False
should_send_mapping = {
self.MANUAL_SUCCESS: not rebuild and success,
self.MANUAL_FAIL: not rebuild and not success,
self.AUTO_SUCCESS: rebuild and success,
self.AUTO_FAIL: rebuild and not success,
self.AUTO_CANCELED: rebuild and canceled
}
for state in self.send_on:
should_send |= should_send_mapping[state]
return should_send
def _render_mail(self, rebuild, success, canceled):
"""Render and return subject and body of the mail to send."""
subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'
body_template = '\n'.join([
'Image: %(image)s',
'Status: %(endstate)s',
'Submitted by: %(user)s',
'Logs: %(logs)s',
])
endstate = None
if canceled:
endstate = 'canceled'
else:
endstate = 'successful' if success else 'failed'
url = None
if self.url and self.workflow.openshift_build_selflink:
url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')
formatting_dict = {
'image': self.workflow.image,
'endstate': endstate,
'user': '<autorebuild>' if rebuild else self.submitter,
'logs': url
}
return (subject_template % formatting_dict, body_template % formatting_dict)
def _get_pdc_token(self):
# we want to allow pdc_secret_path to be None in __init__ - I'm assuming that in future
# we'll want different sources of contact info, so we only want to raise when
# the plugin actually tries to authenticate against PDC and doesn't have pdc_secret_path
if self.pdc_secret_path is None:
raise PluginFailedException('Getting PDC token, but pdc_secret_path is unspecified')
token_file = os.path.join(self.pdc_secret_path, self.PDC_TOKEN_FILE)
self.log.debug('getting PDC token from file %s', token_file)
with open(token_file, 'r') as f:
return f.read().strip()
def _get_component_label(self):
"""Get value of Dockerfile label that is to be used as `global_component` to query
PDC release-components API endpoint.
"""
labels = DockerfileParser(self.workflow.builder.df_path).labels
if self.pdc_component_df_label not in labels:
raise PluginFailedException('No %s label in Dockerfile, can\'t get PDC component',
self.pdc_component_df_label)
return labels[self.pdc_component_df_label]
def _get_receivers_list(self):
"""Return list of receivers of the notification.
:raises RuntimeError: if PDC can't be contacted or doesn't provide sufficient data
:raises PluginFailedException: if there's a critical error while getting PDC data
"""
# TODO: document what this plugin expects to be in Dockerfile/where it gets info from
global_component = self._get_component_label()
# this relies on bump_release plugin configuring source.git_commit to actually be
# branch name, not a commit
if not isinstance(self.workflow.source, GitSource):
raise PluginFailedException('Source is not of type "GitSource", panic!')
git_branch = self.workflow.source.git_commit
try:
r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),
headers={'Authorization': 'Token %s' % self._get_pdc_token()},
params={'global_component': global_component,
'dist_git_branch': git_branch,
'role': self.pdc_contact_role},
verify=self.pdc_verify_cert)
except requests.RequestException as e:
self.log.error('failed to connect to PDC: %s', str(e))
raise RuntimeError(e)
if r.status_code != 200:
self.log.error('PDC returned status code %s, full response: %s',
r.status_code, r.text)
raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %
r.status_code)
contacts = r.json()
if contacts['count'] == 0:
self.log.error('no %s role for the component', self.pdc_contact_role)
raise RuntimeError('no %s role for the component' % self.pdc_contact_role)
send_to = []
for contact in contacts['results']:
send_to.append(contact['contact']['email'])
return send_to
def _send_mail(self, receivers_list, subject, body):
"""Actually sends the mail with `subject` and `body` to all members of `receivers_list`."""
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.from_address
msg['To'] = ', '.join(receivers_list)
s = None
try:
s = smtplib.SMTP(self.smtp_uri)
s.sendmail(self.from_address, receivers_list, msg.as_string())
except (socket.gaierror, smtplib.SMTPException) as e:
raise PluginFailedException('Error communicating with SMTP server: %s' % str(e))
finally:
if s is not None:
s.quit()
def run(self):
# verify that given states are subset of allowed states
unknown_states = set(self.send_on) - self.allowed_states
if len(unknown_states) > 0:
raise PluginFailedException('Unknown state(s) "%s" for sendmail plugin' %
'", "'.join(sorted(unknown_states)))
rebuild = is_rebuild(self.workflow)
success = not self.workflow.build_failed
canceled = self.workflow.autorebuild_canceled
self.log.info('checking conditions for sending notification ...')
if self._should_send(rebuild, success, canceled):
self.log.info('notification about build result will be sent')
subject, body = self._render_mail(rebuild, success, canceled)
try:
self.log.debug('getting list of receivers for this component ...')
receivers = self._get_receivers_list()
except RuntimeError as e:
self.log.error('couldn\'t get list of receivers, sending error message ...')
# TODO: maybe improve the error message/subject
body = '\n'.join([
'Failed to get contact for %s, error: %s' % (str(self.workflow.image), str(e)),
'Since your address is in "error_addresses", this email was sent to you to '
'take action on this.',
'Wanted to send following mail:',
'',
body
])
receivers = self.error_addresses
self.log.info('sending notification to %s ...', receivers)
self._send_mail(receivers, subject, body)
else:
self.log.info('conditions for sending notification not met, doing nothing')
| bsd-3-clause | 1,448,351,702,517,134,300 | 43.285714 | 99 | 0.599128 | false |
DevynCJohnson/Pybooster | pylib/multimedia.py | 1 | 14498 | #!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Library for multimedia manipulation.
@file multimedia.py
@package pybooster.multimedia
@version 2019.07.14
@author Devyn Collier Johnson <[email protected]>
@copyright LGPLv3
@section HELPFUL DOCUMENTATION
FFmpeg
- FFmpeg Codecs: https://ffmpeg.org/ffmpeg-codecs.html
- FFmpeg Filters: https://ffmpeg.org/ffmpeg-filters.html
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from array import array
from multiprocessing import cpu_count
import shlex
from subprocess import PIPE, Popen # nosec
from tempfile import NamedTemporaryFile
import wave
from pybooster.fs import doesfileexist, getfilename
from pybooster.iterables import mergeoddeven
from pybooster.system import is_program_aval
PYGAME_IMPORTED: bool = False
try:
from pygame.mixer import init, music
PYGAME_IMPORTED = True
except ImportError:
pass
__all__: list = [
# GLOBAL CONSTANTS #
r'FFMPEG',
# AUDIO #
r'merge2rawwav',
r'openwavfile',
r'openmp3file',
r'writewavfile',
r'playmusic',
r'audioimg2mp4',
# AUDIO CONVERSIONS #
r'to_aac',
r'to_ac3',
r'to_ac3_fixed',
r'to_flac',
r'to_mp3',
r'to_ogg',
r'to_opus',
r'to_wav_tmp',
r'to_wav',
r'mp3_to_wav',
r'wav_to_mp3'
]
# GLOBAL CONSTANTS #
FFMPEG: str = r'ffmpeg -y -hide_banner -loglevel panic -sn -vn'
# AUDIO #
def merge2rawwav(_wav_data: dict) -> bytes:
"""Merge the split WAV channels back together and convert the data to the original raw WAV format."""
if _wav_data[r'num_channels'] == 2:
return mergeoddeven(_wav_data[r'left_audio'], _wav_data[r'right_audio']).tobytes()
return _wav_data[r'data'].tobytes()
def openwavfile(_filename: str) -> dict:
"""Get the contents of the specified WAV file and return the data as a list of integers in a dictionary describing the data."""
_wav_data: list = []
with wave.open(_filename, mode=r'rb') as _file:
_wav_data.append(_file.readframes(_file.getnframes()))
_out: dict = {
r'num_frames': _file.getnframes(),
r'frame_rate': _file.getframerate(),
r'num_channels': _file.getnchannels(),
r'sample_width': _file.getsampwidth()
}
if _out[r'sample_width'] == 1: # 8-bit
_out[r'data'] = array(r'b', _wav_data[0])
elif _out[r'sample_width'] == 2: # 16-bit
_out[r'data'] = array(r'h', _wav_data[0])
elif _out[r'sample_width'] == 4: # 32-bit
_out[r'data'] = array(r'l', _wav_data[0])
if _out[r'num_channels'] == 2:
_out[r'left_audio'] = _out[r'data'][0::2]
_out[r'right_audio'] = _out[r'data'][1::2]
return _out
def openmp3file(_filename: str) -> dict:
"""Get the contents of the specified MP3 file and return the data as a list of integers in a dictionary describing the data."""
_wav_data: list = []
_tmpfile = NamedTemporaryFile()
if not to_wav_tmp(_filename, _tmpfile.name):
raise Exception(r'Failed to convert MP3 file to WAV!')
with wave.open(_tmpfile.name, mode=r'rb') as _file:
_wav_data.append(_file.readframes(_file.getnframes()))
_out: dict = {
r'num_frames': _file.getnframes(),
r'frame_rate': _file.getframerate(),
r'num_channels': _file.getnchannels(),
r'sample_width': _file.getsampwidth()
}
if _out[r'sample_width'] == 1: # 8-bit
_out[r'data'] = array(r'b', _wav_data[0])
elif _out[r'sample_width'] == 2: # 16-bit
_out[r'data'] = array(r'h', _wav_data[0])
elif _out[r'sample_width'] == 4: # 32-bit
_out[r'data'] = array(r'l', _wav_data[0])
if _out[r'num_channels'] == 2:
_out[r'left_audio'] = _out[r'data'][0::2]
_out[r'right_audio'] = _out[r'data'][1::2]
return _out
def writewavfile(_wav_data: dict, _filename: str) -> None:
"""Write a WAV file using data in the given WAV data dictionary."""
with wave.open(_filename, mode=r'wb') as _file:
_file.setparams((_wav_data[r'num_channels'], _wav_data[r'sample_width'], _wav_data[r'frame_rate'], _wav_data[r'num_frames'], r'NONE', r'not compressed')) # pylint: disable=E1101
_file.writeframes(merge2rawwav(_wav_data)) # pylint: disable=E1101
def playmusic(_filename: str) -> None: # noqa: R701
"""Play an MP3, WAV, or other audio files."""
if PYGAME_IMPORTED:
init()
music.load(_filename)
music.play()
while music.get_busy() is True:
continue
elif is_program_aval(r'ffplay'):
_process = Popen(shlex.split(r'ffplay -hide_banner -loglevel panic -sn -vn -nodisp ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif is_program_aval(r'play'):
_process = Popen(shlex.split(r'play -q -V1 ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.mp3') and is_program_aval(r'mpeg321'):
_process = Popen(shlex.split(r'mpg321 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.mp3') and is_program_aval(r'mpg123'):
_process = Popen(shlex.split(r'mpg123 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.ogg') and is_program_aval(r'ogg123'):
_process = Popen(shlex.split(r'ogg123 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
def audioimg2mp4(_audio_filename: str, _img_filename: str, sample_rate: int = 44100) -> bool:
"""Create an MP4 video given an audio file & an image file; Return True if successful."""
if doesfileexist(_img_filename) and doesfileexist(_audio_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
core_count = r'-threads ' + str(cpu_count())
hw_params = r'-hwaccel cuvid ' + core_count if is_program_aval(r'nvidia-smi') else r'-hwaccel vaapi ' + core_count
_process = Popen(shlex.split(r'ffmpeg -y -hide_banner -loglevel panic ' + hw_params + r' -thread_queue_size 4096 -probesize 20M -analyzeduration 20M -i ' + _img_filename + r' -i ' + _audio_filename + r' -c:v libx264 -crf 15 -tune stillimage -vf scale=2560:1440 -c:a libmp3lame -b:a 320000 -ar ' + str(sample_rate) + r' -compression_level 0 ' + getfilename(_audio_filename) + r'_merged.mp4'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
# AUDIO CONVERSIONS #
def to_aac(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AAC file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libfaac -ar ' + str(sample_rate) + r' -f aac ' + getfilename(_filename) + r'.aac'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ac3(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AC3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a ac3 -ar ' + str(sample_rate) + r' -f ac3 ' + getfilename(_filename) + r'.ac3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ac3_fixed(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AC3 (Fixed) file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a ac3_fixed -ar ' + str(sample_rate) + r' -f ac3 ' + getfilename(_filename) + r'.ac3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_flac(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a Flac file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a flac -ar ' + str(sample_rate) + r' -compression_level 12 -f flac ' + getfilename(_filename) + r'.flac'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_mp3(_filename: str, bitrate: int = 320000, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an MP3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libmp3lame -b:a ' + str(bitrate) + r' -ar ' + str(sample_rate) + r' -compression_level 0 -f mp3 ' + getfilename(_filename) + r'.mp3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ogg(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an OGG file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libvorbis -ar ' + str(sample_rate) + r' -f ogg ' + getfilename(_filename) + r'.ogg'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_opus(_filename: str) -> bool:
"""Convert an audio file to an OPUS file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libopus -compression_level 10 -f opus ' + getfilename(_filename) + r'.opus'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_wav_tmp(_filename: str, _tmpname: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a pcm_s16le -ar ' + str(sample_rate) + r' -f wav ' + _tmpname), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_wav(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a pcm_s16le -ar ' + str(sample_rate) + r' -f wav ' + getfilename(_filename) + r'.wav'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def mp3_to_wav(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an MP3 file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
return to_wav(_filename, sample_rate)
if is_program_aval(r'mpeg321'):
_process = Popen(shlex.split(r'mpg321 --quiet --stereo --wav ' + _filename + r' ' + _filename.replace(r'.mp3', r'.wav')), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif is_program_aval(r'mpg123'):
_process = Popen(shlex.split(r'mpg123 --quiet --stereo --wav ' + _filename + r' ' + _filename.replace(r'.mp3', r'.wav')), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def wav_to_mp3(_filename: str, bitrate: int = 320000, sample_rate: int = 44100) -> bool:
"""Convert a WAV file to an MP3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
return to_mp3(_filename, bitrate, sample_rate)
return False
| lgpl-3.0 | -5,862,337,393,570,860,000 | 39.272222 | 429 | 0.618016 | false |
gbenson/i8c | tests/test_compiler_driver.py | 1 | 4941 | # -*- coding: utf-8 -*-
# Copyright (C) 2015-16 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from tests import TestCase
from i8c.compiler import I8CError
from i8c.compiler.driver import main
import os
import subprocess
import sys
import tempfile
SOURCE = """\
define test::func
return
"""
class TestCompilerDriver(TestCase):
"""Test i8c.compiler.driver.main.
This testcase should be the bare minumum to exercise the function
i8c.compiler.driver.main and its helper functions. Command line
processing tests should be in test_commandline_processor.py, and
tests exercising the compiler generally (i8c.compiler.compile)
should be in their own files.
"""
def setUp(self):
# Set up a working directory
self.workdir = tempfile.mkdtemp()
self.filebase = os.path.join(self.workdir, "test")
self.infile = self.filebase + ".i8"
with open(self.infile, "w") as fp:
fp.write(SOURCE)
# Pipe stderr to a file
tmpfile = os.path.join(self.workdir, "stderr")
self.stderr_fd = os.open(tmpfile,
os.O_RDWR | os.O_CREAT | os.O_EXCL,
0o600)
sys.stderr.flush()
self.saved_stderr_fd = os.dup(2)
os.dup2(self.stderr_fd, 2)
def tearDown(self):
# Restore stderr
sys.stderr.flush()
os.dup2(self.saved_stderr_fd, 2)
os.close(self.saved_stderr_fd)
os.close(self.stderr_fd)
# Delete the working directory
os.chmod(self.workdir, 0o700)
subprocess.call(("rm", "-rf", self.workdir))
# Test all specifiable permutations of (with_cpp,with_i8c,with_asm)
def __run_permtest(self, args, outext):
self.outfile = self.filebase + outext
if "-E" in args:
args.extend(("-o", self.outfile))
args.append(self.infile)
self.assertFalse(os.path.exists(self.outfile))
status = main(args)
self.assertIs(status, None)
self.assertTrue(os.path.isfile(self.outfile))
junk = os.path.join(self.workdir, "-.o")
self.assertFalse(os.path.exists(junk))
def test_do_nothing(self):
"""Check that -E -fpreprocessed is rejected."""
self.assertRaises(I8CError, main, ["-E", "-fpreprocessed"])
def test_pp_to_asm(self):
"""Check that preprocessed source to assembly works."""
self.__run_permtest(["-S", "-fpreprocessed"], ".S")
def test_pp_to_obj(self):
"""Check that preprocessed source to object code works."""
self.__run_permtest(["-fpreprocessed", "-c"], ".o")
def test_i8_to_pp(self):
"""Check that i8 source to preprocessed source works."""
self.__run_permtest(["-E"], ".i8p")
def test_i8_to_asm(self):
"""Check that i8 source to assembly works."""
self.__run_permtest(["-S"], ".S")
def test_i8_to_obj(self):
"""Check that i8 source to object code works."""
self.__run_permtest(["-c"], ".o")
# Test that GCC errors are handled correctly
def __run_failtest(self):
status = main(["-c", self.infile])
self.assertIsNot(status, None)
size = os.lseek(self.stderr_fd, 0, 1)
os.lseek(self.stderr_fd, 0, 0)
output = os.read(self.stderr_fd, size).decode("utf-8")
self.assertGreaterEqual(output.find("error:"), 0)
def test_cpp_failure(self):
"""Check that preprocessor errors are handled correctly."""
os.unlink(self.infile)
self.__run_failtest()
def test_asm_failure(self):
"""Check that assembler errors are handled correctly."""
os.chmod(self.workdir, 0o500)
self.__run_failtest()
# Test that multiple input files with no output file is caught
def test_multi_input_no_output(self):
"""Check that unguessable output filenames are handled."""
infile2 = os.path.join(self.workdir, "test2.i8")
open(infile2, "w")
self.assertRaises(I8CError,
self.__run_permtest, ["-c", infile2], ".o")
| lgpl-2.1 | -4,894,711,709,997,495,000 | 34.804348 | 71 | 0.632463 | false |
sequana/sequana | sequana/genbank.py | 1 | 5965 | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import re
from sequana.annotations import Annotation
from sequana.fasta import FastA
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["GenBank"]
# TODO: we should factorise gff and genbank in a parent class (Annotation)
class GenBank(Annotation):
"""
::
gg = GenBank()
gg.get_types()
"""
def __init__(self, filename):
super(GenBank, self).__init__(filename)
def get_types(self):
records = self.genbank_features_parser()
_types = set()
for contig in records.keys():
for feature in records[contig]:
_type = feature['type']
_types.add(_type)
return sorted(_types)
def extract_fasta(self, fastafile, features=['rRNA']):
types = self.get_types()
for feature in features:
if feature not in types:
raise ValueError("{} not found".format(feature))
# fasta may have several contig/chromosome names
# the gene bank should be compatible !!
fasta = FastA(fastafile)
contig_names = fasta.get_lengths_as_dict()
# most of the times, the version is not in the gbk
contig_names = [x.split(".")[0] for x in contig_names]
# then we read the features from the genbank
records = self.genbank_features_parser()
contig_names_gbk = list(records.keys())
# FIXME FastA is not very efficient for eukaryotes but is enough for now
output = ""
for name in records.keys():
if name not in contig_names:
logger.warning("{} contig from genbank not found in fasta".format(name))
continue
index = contig_names.index(name)
sequence = fasta.sequences[index]
for item in records[name]:
if item['type'] in features:
start, end = item['gene_start'], item['gene_end']
try:
info = item['product']
output += ">{}_{}_{}_{} {}\n".format(name, item['type'],
start,end, info)
except:
output += ">{}_{}_{}_{} {}\n".format(name, item['type'], start, end)
output+= "{}\n".format(sequence[start:end])
return output
def genbank_features_parser(self):
""" Return dictionary with features contains inside a genbank file.
:param str input_filename: genbank formated file
"""
new_feature = {}
records = {}
feature_list = []
feature_field = False
with open(self.filename, "r") as fp:
for line in fp:
# pass header and sequence fields
if not feature_field:
# get contig/chrom name
if line.startswith("LOCUS"):
name = line.split()[1]
elif line.startswith("FEATURE"):
feature_field = True
else:
# if feature field is finished
if line.startswith("ORIGIN"):
feature_field = False
records[name] = feature_list
feature_list = []
new_feature = []
continue
# if there are a word in qualifier indent (feature type)
# maybe we need to infer the size of indentation ???
if line[0:20].split():
if new_feature:
feature_list.append(new_feature)
split_line = line.split()
t = split_line[0]
# Handle :
#complement(order(1449596..1449640,1449647..1450684,
#1450695..1450700))
positions = split_line[1]
if positions[0].isalpha():
while not line[:-1].endswith(")"):
line = next(fp)
positions += line
pos = [int(n) for n in re.findall(r"\d+", positions)]
# Handle complement(join(3773333..3774355,3774357..3774431))
start = pos[0]
end = pos[-1]
strand = "-" if split_line[1].startswith("c") else "+"
new_feature = {"type": t, "gene_start": start,
"gene_end": end, "strand": strand}
# recover qualifier bound with feature
else:
quali_line = line.strip().replace('"', '')
if quali_line.startswith("/") and "=" in quali_line:
qualifier = quali_line.split("=")
key = qualifier[0][1:]
new_feature[key] = qualifier[1]
else:
if key == "translation":
new_feature[key] += quali_line
else:
new_feature[key] += " " + quali_line
return records
| bsd-3-clause | -5,432,652,271,706,455,000 | 36.753165 | 92 | 0.474099 | false |
ArnesSI/collectd-python-shower | shower/data/template.py | 1 | 6506 | # -*- coding: utf-8 -*-
########################################################################
#
# MIT License
#
# Copyright (c) 2017 Matej Vadnjal <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
########################################################################
import os
import re
from copy import deepcopy
from ..exceptions import ShowerConfigException
from .data import Data
class DataTextFSM(Data):
def __init__(self, conf):
self.template = None
self.searchdirs = [os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, 'templates'))]
self.template_fullpath = '' # full absolute path to the textfsm template
self.command = None
self.typeoverride = None
self._textfsm = None
super(DataTextFSM, self).__init__(conf)
self._get_template_path()
self._read_command()
self._textfsm_init()
def _from_conf(self, conf):
super(DataTextFSM, self)._from_conf(conf)
# get TemplateDir from parent conf
for node in conf.parent.children:
key = node.key.lower()
if key == 'templatepath':
self.searchdirs.extend(node.values)
for node in conf.children:
key = node.key.lower()
if key in ['template', 'command', 'typeoverride']:
setattr(self, key, str(node.values[0]))
elif key == 'templatepath':
self.searchdirs.extend(node.values)
def _validate(self):
super(DataTextFSM, self)._validate()
if not self.template:
raise ShowerConfigException('Missing Template in Data "{}" section'.format(self.name))
def _get_template_path(self):
if os.path.isabs(self.template):
self.template_fullpath = self.template
else:
for searchdir in reversed(self.searchdirs):
test_fullpath = os.path.join(searchdir, self.template)
if os.path.isfile(test_fullpath):
self.template_fullpath = test_fullpath
break
if not os.path.isfile(self.template_fullpath):
raise ShowerConfigException('Template does not exist or is not readable "{}" in Data "{}"'.format(self.template_fullpath, self.name))
def _read_command(self):
if self.command:
# don't search if set in collectd config file
return
with open(self.template_fullpath, 'r') as fh:
for line in fh:
if line.startswith('Value ') and not self.command:
raise ShowerConfigException('Command not found in TextFSM template "{}" in Data "{}"'.format(self.template_fullpath, self.name))
m = re.search(r'^#\s*[Cc]ommand:\s+(.+?)\s*$', line)
if m:
self.command = m.group(1)
if not self.command:
raise ShowerConfigException('Command not found in TextFSM template "{}" in Data "{}"'.format(self.template_fullpath, self.name))
def _textfsm_init(self):
try:
import textfsm
except ImportError as e:
self.log('error', 'You\'ll need to install textfsm Python module to use textfsm style parsing.')
raise
try:
self._textfsm = textfsm.TextFSM(open(self.template_fullpath))
except textfsm.TextFSMTemplateError as e:
self.log('error', 'TextFSMTemplateError "{}" while parsing TextFSM template "{}" in Data "{}"'.format(e, self.template_fullpath, self.name))
raise
if self.typeoverride:
# to signal to dispach method it needts to set type_instance
self.table = True
def parse(self, output):
this_tfsm = deepcopy(self._textfsm)
this_tfsm.ParseText(output)
return self._textfsm_to_dict(this_tfsm)
def _textfsm_to_dict(self, tfsm):
results = {}
self.log('info', 'TextFSM result: {}'.format(repr(tfsm._result)))
# Convert TextFSM object to list of dictionaries (by Kirk Byers)
temp_dict = None
for row in tfsm._result:
typeinstance_values = [None] * len(self.typeinstance)
temp_dict = {}
for index, element in enumerate(row):
header = tfsm.header[index].lower()
if self.table and self.typeinstance and header in self.typeinstance:
typeinstance_values[self.typeinstance.index(header)] = str(element)
elif self.types and header not in self.types:
# this is a field we do not want
continue
else:
temp_dict[header] = element
assert None not in typeinstance_values
results['_'.join(typeinstance_values)] = temp_dict
if not self.table and temp_dict:
# if TypeInstance not in configuration, assume only one record will
# be returned -> place it under key '0'
results['0'] = temp_dict
if self.typeoverride:
# if user set TypeOverride in the config, we can only support one
# result from textfsm.
# We actually change type into type_instance and set type to value
# from config.
results = {}
for typ_inst, val in temp_dict.items():
results[typ_inst] = {self.typeoverride: val}
self.log('info', repr(results))
return results
| mit | 5,864,907,224,726,007,000 | 44.180556 | 152 | 0.604211 | false |
jplu/fasttext-for-linking | fasttext_app.py | 1 | 2105 | import fasttext
import gensim
import numpy
import flask
import spacy
import argparse
import os
import operator
import collections
app = flask.Flask(__name__)
def file_exists(x):
if not os.path.isfile(x):
import argparse
raise argparse.ArgumentTypeError("{0} is not a file".format(x))
return x
def init(args):
global model
global nlp
model = fasttext.load_model(args.model)
nlp = spacy.load(args.language)
@app.route('/fasttext', methods=['POST'])
def fasttext_sim():
if not flask.request.json or not 'entities' in flask.request.json or not 'text' in flask.request.json or not 'mention' in flask.request.json:
flask.abort(400)
scores = {}
clean_text = [token.orth_ for token in nlp(flask.request.json['text']) if not (token.is_punct or token.is_stop or token.is_space or token.orth_ == flask.request.json['mention'])]
for entity in flask.request.json['entities']:
clean_entity = [token.orth_ for token in nlp(entity) if not (token.is_punct or token.is_stop or token.is_space)]
v1 = model["_".join(clean_entity).lower()]
v2 = [model[word.lower()] for word in clean_text]
if v1 and v2:
scores[entity] = numpy.dot(gensim.matutils.unitvec(numpy.array(v1).mean(axis=0)), gensim.matutils.unitvec(numpy.array(v2).mean(axis=0)))
else:
scores[entity] = 0.0
sorted_scores = collections.OrderedDict(sorted(scores.items(), key=operator.itemgetter(1), reverse=True))
return flask.jsonify(sorted_scores), 200
def main():
parser = argparse.ArgumentParser(description="Webapp for entity linking using fastText in a given language", prog="fasttext_app")
parser.add_argument("-l", "--language", required=True, help="Set the language")
parser.add_argument("-m", "--model", required=True, type=file_exists, help="Set the fastText model")
parser.add_argument("-p", "--port", required=True, type=int, help="Set the port")
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
init(args)
app.config["JSON_SORT_KEYS"] = False
app.run(host='0.0.0.0', port=args.port)
if __name__ == '__main__':
main()
| apache-2.0 | 5,421,315,234,155,714,000 | 33.508197 | 179 | 0.703563 | false |
bwesterb/pol | src/blockcipher.py | 1 | 2756 | """ Implementation of the block ciphers """
import logging
import pol.serialization
import Crypto.Cipher.AES
import Crypto.Util.Counter
l = logging.getLogger(__name__)
class BlockCipherParameterError(ValueError):
pass
class BaseStream(object):
def encrypt(self, s):
raise NotImplementedError
def decrypt(self, s):
raise NotImplementedError
class BlockCipher(object):
""" Encrypts blocks with a fixed key. """
def __init__(self, params):
""" Initialize the BlockCipher with the given parameters.
NOTE use BlockCipher.setup """
self.params = params
@staticmethod
def setup(params=None):
""" Set-up the blockcipher given by `params`. """
if params is None:
params = {'type': 'aes',
'bits': 256 }
if ('type' not in params or not isinstance(params['type'], basestring)
or params['type'] not in TYPE_MAP):
raise BlockCipherParameterError("Invalid `type' attribute")
return TYPE_MAP[params['type']](params)
@property
def blocksize(self):
""" blocksize in bytes """
raise NotImplementedError
@property
def keysize(self):
""" size of key in bytes """
raise NotImplementedError
def new_stream(self, key, iv, offset=0):
raise NotImplementedError
class _AESStream(BaseStream):
def __init__(self, cipher):
self.cipher = cipher
def encrypt(self, s):
return self.cipher.encrypt(s)
def decrypt(self, s):
return self.cipher.decrypt(s)
class AESBlockCipher(BlockCipher):
""" AES is the default blockcipher """
def __init__(self, params):
super(AESBlockCipher, self).__init__(params)
if not 'bits' in params or params['bits'] not in (256, ):
raise KeyStretchingParameterError("Invalid param `bits'")
self.bits = params['bits']
def new_stream(self, key, iv, offset=0):
if offset % 16 != 0:
raise ValueError("`offset' should be a multiple of 16")
if len(key) * 8 != self.bits:
raise ValueError("`key' should be %s long" % (self.bits/8))
if len(iv) != 16:
raise ValueError("`iv' should be 16 bytes long")
ctr = Crypto.Util.Counter.new(128,
initial_value=pol.serialization.string_to_number(iv)
+ offset/16)
cipher = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CTR,
counter=ctr)
return _AESStream(cipher)
@property
def blocksize(self):
return 16
@property
def keysize(self):
return self.bits / 8
TYPE_MAP = {'aes': AESBlockCipher}
| gpl-3.0 | 7,931,594,299,527,833,000 | 28.956522 | 78 | 0.592163 | false |
ZuoCaiSong/Python | Base/IO.py | 1 | 3924 | #! usr/bin/env pyhton
# -*- coding: utf-8-*-
#1: 打开文件
f = open('/Users/Encore/Desktop/Python3.0/Python/io.txt', 'r')
#2: 如果文件存在调用read方法
print f.read()
# 3:最后一步是调用 close() 方法关闭文件。文件使用完毕后必须关闭,因为文件对象会占用操作系统的资源,并且操 作系统同一时间能打开的文件数量也是有限的:
f.close()
#如果文件不存在, open() 函数就会抛出一个 IOError 的错误,并且给出错误码和详细的信息告诉你文件不存在:
#f = open('/Users/Encore/Desktop/不存在的文件.rtf', 'r')
'''
由于文件读写时都有可能产生 IOError ,
一旦出错,后面的 f.close() 就不会调用。
所以,为了保证无论是否出错 都能正确地关闭文件,
我们可以使用 try ... finally 来实现:
'''
try:
f = open('/Users/Encore/Desktop/Python3.0/Python/io.txt', 'r')
print f.read()
finally:
if f:
f.close
'''
但是每次都这么写实在太繁琐,所以,Python引入了 with 语句来自动帮我们调用 close() 方法:
这和前面的 try ... finally 是一样的,但是代码更佳简洁,并且不必调用 f.close() 方法。
调用 read() 会一次性读取文件的全部内容,如果文件有10G,内存就爆了,
所以,要保险起见,可以反复调 用 read(size) 方法,每次最多读取size个字节的内容。
另外,调用 readline() 可以每次读取一行内容,调 用 readlines() 一次读取所有内容并按行返回 list 。
因此,要根据需要决定怎么调用。
如果文件很小, read() 一次性读取最方便;如果不能确定文件大小,
反复调用 read(size) 比较保险;如果是配置 文件,调用 readlines() 最方便:
'''
# with open('/Users/Encore/Desktop/Python3.0/Python/io2.txt', 'r') as f1:
# print f1.read()
f1 = open('/Users/Encore/Desktop/Python3.0/Python/io2.txt', 'r')
for line in f1.readlines():
print(line.strip()) # 把末尾的'\n'删掉
'''
前面讲的默认都是读取文本文件,并且是ASCII编码的文本文件。要读取二进制文件,比如图片、视频等
等,用 'rb' 模式打开文件即可:
'''
#imageRB = open('/Users/Encore/Desktop/zp.png','rb')
#print imageRB.read()
'''
字符编码 要读取非ASCII编码的文本文件,就必须以二进制模式打开,再解码。比如GBK编码的文件:
如果每次都这么手动转换编码嫌麻烦(写程序怕麻烦是好事,不怕麻烦就会写出又长又难懂又没法维护的代码),
P ython还 供了一个 codecs 模块帮我们在读文件时自动转换编码,直接读出unicode:
'''
'''
f = open('/Users/michael/gbk.txt', 'rb')
import codecs
with codecs.open('/Users/michael/gbk.txt', 'r', 'gbk') as f:
f.read() # u'\u6d4b\u8bd5'
'''
'''
写文件
写文件和读文件是一样的,唯一区别是调用 open() 函数时,
传入标识符 'w' 或者 'wb' 表示写文本文件或写二进制 文件:
'''
f = open('/Users/Encore/Desktop/Python3.0/Python/io2.txt', 'w')
f.write('python 语法写入的')
f.close()
'''
当我们写文件时,操作系统往往不 会立刻把数据写入磁盘,而是放到内存缓存起来,空闲的时候再慢慢写入。
只有调用 close() 方法时,操作系统才 保证把没有写入的数据全部写入磁盘。
忘记调用 close() 的后果是数据可能只写了一部分到磁盘,剩下的丢失 了。
所以,还是用 with 语句来得保险:
要写入特定编码的文本文件,请效仿 codecs 的示例,写入unicode,由 codecs 自动转换成指定编码。
'''
with open('/Users/Encore/Desktop/Python3.0/Python/io2.txt', 'w') as f:
f.write("用with可以保证写入的内容会被保存到磁盘")
'''
小结
在Python中,文件读写是通过 open() 函数打开的文件对象完成的。使用 with 语句操作文件IO是个好习惯。
'''
| mit | 4,743,301,011,331,857,000 | 18.789474 | 81 | 0.70656 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.