repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
xisisu/RT-Xen | tools/python/logging/logging-0.4.9.2/test/log_test5.py | 42 | 1795 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests SMTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import logging, logging.handlers
MAILHOST = 'beta'
FROM = '[email protected]'
TO = ['arkadi_renko']
SUBJECT = 'Test Logging email from Python logging module (non-buffering)'
def main():
log = logging.getLogger("")
log.setLevel(logging.DEBUG)
hdlr = logging.handlers.SMTPHandler(MAILHOST, FROM, TO, SUBJECT)
hdlr.setFormatter(logging.Formatter("%(asctime)s %(levelname)-5s %(message)s"))
log.addHandler(hdlr)
log.info("Test email contents")
log.removeHandler(hdlr)
if __name__ == "__main__":
main() | gpl-2.0 | -5,454,648,638,592,503,000 | 39.818182 | 83 | 0.750975 | false |
simongoffin/website_version | addons/purchase_requisition/wizard/purchase_requisition_partner.py | 373 | 2320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_requisition_partner(osv.osv_memory):
_name = "purchase.requisition.partner"
_description = "Purchase Requisition Partner"
_columns = {
'partner_id': fields.many2one('res.partner', 'Supplier', required=True,domain=[('supplier', '=', True)]),
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
res = super(purchase_requisition_partner, self).view_init(cr, uid, fields_list, context=context)
record_id = context and context.get('active_id', False) or False
tender = self.pool.get('purchase.requisition').browse(cr, uid, record_id, context=context)
if not tender.line_ids:
raise osv.except_osv(_('Error!'), _('Define product(s) you want to include in the call for bids.'))
return res
def create_order(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.requisition').make_purchase_order(cr, uid, active_ids, data.partner_id.id, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,601,369,103,104,782,300 | 46.346939 | 123 | 0.634483 | false |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/files/apps/headphones/lib/unidecode/x1d4.py | 248 | 3839 | data = (
'A', # 0x00
'B', # 0x01
'C', # 0x02
'D', # 0x03
'E', # 0x04
'F', # 0x05
'G', # 0x06
'H', # 0x07
'I', # 0x08
'J', # 0x09
'K', # 0x0a
'L', # 0x0b
'M', # 0x0c
'N', # 0x0d
'O', # 0x0e
'P', # 0x0f
'Q', # 0x10
'R', # 0x11
'S', # 0x12
'T', # 0x13
'U', # 0x14
'V', # 0x15
'W', # 0x16
'X', # 0x17
'Y', # 0x18
'Z', # 0x19
'a', # 0x1a
'b', # 0x1b
'c', # 0x1c
'd', # 0x1d
'e', # 0x1e
'f', # 0x1f
'g', # 0x20
'h', # 0x21
'i', # 0x22
'j', # 0x23
'k', # 0x24
'l', # 0x25
'm', # 0x26
'n', # 0x27
'o', # 0x28
'p', # 0x29
'q', # 0x2a
'r', # 0x2b
's', # 0x2c
't', # 0x2d
'u', # 0x2e
'v', # 0x2f
'w', # 0x30
'x', # 0x31
'y', # 0x32
'z', # 0x33
'A', # 0x34
'B', # 0x35
'C', # 0x36
'D', # 0x37
'E', # 0x38
'F', # 0x39
'G', # 0x3a
'H', # 0x3b
'I', # 0x3c
'J', # 0x3d
'K', # 0x3e
'L', # 0x3f
'M', # 0x40
'N', # 0x41
'O', # 0x42
'P', # 0x43
'Q', # 0x44
'R', # 0x45
'S', # 0x46
'T', # 0x47
'U', # 0x48
'V', # 0x49
'W', # 0x4a
'X', # 0x4b
'Y', # 0x4c
'Z', # 0x4d
'a', # 0x4e
'b', # 0x4f
'c', # 0x50
'd', # 0x51
'e', # 0x52
'f', # 0x53
'g', # 0x54
'', # 0x55
'i', # 0x56
'j', # 0x57
'k', # 0x58
'l', # 0x59
'm', # 0x5a
'n', # 0x5b
'o', # 0x5c
'p', # 0x5d
'q', # 0x5e
'r', # 0x5f
's', # 0x60
't', # 0x61
'u', # 0x62
'v', # 0x63
'w', # 0x64
'x', # 0x65
'y', # 0x66
'z', # 0x67
'A', # 0x68
'B', # 0x69
'C', # 0x6a
'D', # 0x6b
'E', # 0x6c
'F', # 0x6d
'G', # 0x6e
'H', # 0x6f
'I', # 0x70
'J', # 0x71
'K', # 0x72
'L', # 0x73
'M', # 0x74
'N', # 0x75
'O', # 0x76
'P', # 0x77
'Q', # 0x78
'R', # 0x79
'S', # 0x7a
'T', # 0x7b
'U', # 0x7c
'V', # 0x7d
'W', # 0x7e
'X', # 0x7f
'Y', # 0x80
'Z', # 0x81
'a', # 0x82
'b', # 0x83
'c', # 0x84
'd', # 0x85
'e', # 0x86
'f', # 0x87
'g', # 0x88
'h', # 0x89
'i', # 0x8a
'j', # 0x8b
'k', # 0x8c
'l', # 0x8d
'm', # 0x8e
'n', # 0x8f
'o', # 0x90
'p', # 0x91
'q', # 0x92
'r', # 0x93
's', # 0x94
't', # 0x95
'u', # 0x96
'v', # 0x97
'w', # 0x98
'x', # 0x99
'y', # 0x9a
'z', # 0x9b
'A', # 0x9c
'', # 0x9d
'C', # 0x9e
'D', # 0x9f
'', # 0xa0
'', # 0xa1
'G', # 0xa2
'', # 0xa3
'', # 0xa4
'J', # 0xa5
'K', # 0xa6
'', # 0xa7
'', # 0xa8
'N', # 0xa9
'O', # 0xaa
'P', # 0xab
'Q', # 0xac
'', # 0xad
'S', # 0xae
'T', # 0xaf
'U', # 0xb0
'V', # 0xb1
'W', # 0xb2
'X', # 0xb3
'Y', # 0xb4
'Z', # 0xb5
'a', # 0xb6
'b', # 0xb7
'c', # 0xb8
'd', # 0xb9
'', # 0xba
'f', # 0xbb
'', # 0xbc
'h', # 0xbd
'i', # 0xbe
'j', # 0xbf
'k', # 0xc0
'l', # 0xc1
'm', # 0xc2
'n', # 0xc3
'', # 0xc4
'p', # 0xc5
'q', # 0xc6
'r', # 0xc7
's', # 0xc8
't', # 0xc9
'u', # 0xca
'v', # 0xcb
'w', # 0xcc
'x', # 0xcd
'y', # 0xce
'z', # 0xcf
'A', # 0xd0
'B', # 0xd1
'C', # 0xd2
'D', # 0xd3
'E', # 0xd4
'F', # 0xd5
'G', # 0xd6
'H', # 0xd7
'I', # 0xd8
'J', # 0xd9
'K', # 0xda
'L', # 0xdb
'M', # 0xdc
'N', # 0xdd
'O', # 0xde
'P', # 0xdf
'Q', # 0xe0
'R', # 0xe1
'S', # 0xe2
'T', # 0xe3
'U', # 0xe4
'V', # 0xe5
'W', # 0xe6
'X', # 0xe7
'Y', # 0xe8
'Z', # 0xe9
'a', # 0xea
'b', # 0xeb
'c', # 0xec
'd', # 0xed
'e', # 0xee
'f', # 0xef
'g', # 0xf0
'h', # 0xf1
'i', # 0xf2
'j', # 0xf3
'k', # 0xf4
'l', # 0xf5
'm', # 0xf6
'n', # 0xf7
'o', # 0xf8
'p', # 0xf9
'q', # 0xfa
'r', # 0xfb
's', # 0xfc
't', # 0xfd
'u', # 0xfe
'v', # 0xff
)
| gpl-2.0 | 7,056,980,064,985,956,000 | 13.879845 | 14 | 0.331336 | false |
panchenji/Ryu_modified | ryu/controller/tunnels.py | 50 | 6952 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2012 Isaku Yamahata <yamahata at private email ne jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import ryu.exception as ryu_exc
from ryu.base import app_manager
from ryu.controller import event
class RemoteDPIDAlreadyExist(ryu_exc.RyuException):
message = ('port (%(dpid)s, %(port)s) has already '
'remote dpid %(remote_dpid)s')
class TunnelKeyAlreadyExist(ryu_exc.RyuException):
message = 'tunnel key %(tunnel_key)s already exists'
class TunnelKeyNotFound(ryu_exc.RyuException):
message = 'no tunnel key for network %(network_id)s'
class EventTunnelKeyBase(event.EventBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyBase, self).__init__()
self.network_id = network_id
self.tunnel_key = tunnel_key
class EventTunnelKeyAdd(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyAdd, self).__init__(network_id, tunnel_key)
class EventTunnelKeyDel(EventTunnelKeyBase):
def __init__(self, network_id, tunnel_key):
super(EventTunnelKeyDel, self).__init__(network_id, tunnel_key)
class EventTunnelPort(event.EventBase):
def __init__(self, dpid, port_no, remote_dpid, add_del):
super(EventTunnelPort, self).__init__()
self.dpid = dpid
self.port_no = port_no
self.remote_dpid = remote_dpid
self.add_del = add_del
class TunnelKeys(dict):
"""network id(uuid) <-> tunnel key(32bit unsigned int)"""
def __init__(self, f):
super(TunnelKeys, self).__init__()
self.send_event = f
def get_key(self, network_id):
try:
return self[network_id]
except KeyError:
raise TunnelKeyNotFound(network_id=network_id)
def _set_key(self, network_id, tunnel_key):
self[network_id] = tunnel_key
self.send_event(EventTunnelKeyAdd(network_id, tunnel_key))
def register_key(self, network_id, tunnel_key):
if network_id in self:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
if tunnel_key in self.values():
raise TunnelKeyAlreadyExist(tunnel_key=tunnel_key)
self._set_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
if network_id not in self and tunnel_key in self.values():
raise TunnelKeyAlreadyExist(key=tunnel_key)
key = self.get(network_id)
if key is None:
self._set_key(network_id, tunnel_key)
return
if key != tunnel_key:
raise ryu_exc.NetworkAlreadyExist(network_id=network_id)
def delete_key(self, network_id):
try:
tunnel_key = self[network_id]
self.send_event(EventTunnelKeyDel(network_id, tunnel_key))
del self[network_id]
except KeyError:
raise ryu_exc.NetworkNotFound(network_id=network_id)
class DPIDs(object):
"""dpid -> port_no -> remote_dpid"""
def __init__(self, f):
super(DPIDs, self).__init__()
self.dpids = collections.defaultdict(dict)
self.send_event = f
def list_ports(self, dpid):
return self.dpids[dpid]
def _add_remote_dpid(self, dpid, port_no, remote_dpid):
self.dpids[dpid][port_no] = remote_dpid
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, True))
def add_remote_dpid(self, dpid, port_no, remote_dpid):
if port_no in self.dpids[dpid]:
raise ryu_exc.PortAlreadyExist(dpid=dpid, port=port_no,
network_id=None)
self._add_remote_dpid(dpid, port_no, remote_dpid)
def update_remote_dpid(self, dpid, port_no, remote_dpid):
remote_dpid_ = self.dpids[dpid].get(port_no)
if remote_dpid_ is None:
self._add_remote_dpid(dpid, port_no, remote_dpid)
elif remote_dpid_ != remote_dpid:
raise ryu_exc.RemoteDPIDAlreadyExist(dpid=dpid, port=port_no,
remote_dpid=remote_dpid)
def get_remote_dpid(self, dpid, port_no):
try:
return self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def delete_port(self, dpid, port_no):
try:
remote_dpid = self.dpids[dpid][port_no]
self.send_event(EventTunnelPort(dpid, port_no, remote_dpid, False))
del self.dpids[dpid][port_no]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=port_no)
def get_port(self, dpid, remote_dpid):
try:
dp = self.dpids[dpid]
except KeyError:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
res = [port_no for (port_no, remote_dpid_) in dp.items()
if remote_dpid_ == remote_dpid]
assert len(res) <= 1
if len(res) == 0:
raise ryu_exc.PortNotFound(dpid=dpid, port=None, network_id=None)
return res[0]
class Tunnels(app_manager.RyuApp):
def __init__(self):
super(Tunnels, self).__init__()
self.name = 'tunnels'
self.tunnel_keys = TunnelKeys(self.send_event_to_observers)
self.dpids = DPIDs(self.send_event_to_observers)
def get_key(self, network_id):
return self.tunnel_keys.get_key(network_id)
def register_key(self, network_id, tunnel_key):
self.tunnel_keys.register_key(network_id, tunnel_key)
def update_key(self, network_id, tunnel_key):
self.tunnel_keys.update_key(network_id, tunnel_key)
def delete_key(self, network_id):
self.tunnel_keys.delete_key(network_id)
def list_ports(self, dpid):
return self.dpids.list_ports(dpid).keys()
def register_port(self, dpid, port_no, remote_dpid):
self.dpids.add_remote_dpid(dpid, port_no, remote_dpid)
def update_port(self, dpid, port_no, remote_dpid):
self.dpids.update_remote_dpid(dpid, port_no, remote_dpid)
def get_remote_dpid(self, dpid, port_no):
return self.dpids.get_remote_dpid(dpid, port_no)
def delete_port(self, dpid, port_no):
self.dpids.delete_port(dpid, port_no)
#
# methods for gre tunnel
#
def get_port(self, dpid, remote_dpid):
return self.dpids.get_port(dpid, remote_dpid)
| apache-2.0 | -6,309,003,264,394,401,000 | 33.587065 | 79 | 0.634638 | false |
jeffninghan/tracker | OCR_test/ocr_test.py | 1 | 1285 | import numpy as np
import cv2
from matplotlib import pyplot as plt
# test algorithm to recognize digits using kNN
# source: http://docs.opencv.org/trunk/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.html
img = cv2.imread('../data/digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
print "Training using kNN..."
knn = cv2.KNearest()
knn.train(train,train_labels)
ret,result,neighbours,dist = knn.find_nearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print "Accuracy:", accuracy | mit | -2,021,972,136,132,399,600 | 33.756757 | 101 | 0.729961 | false |
be-cloud-be/horizon-addons | web/web_search_alphabetic/__init__.py | 2 | 1129 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2014 Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,513,829,421,170,871,000 | 48.086957 | 85 | 0.620903 | false |
darkleons/BE | addons/crm/res_partner.py | 47 | 4790 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
""" Inherits partner and adds CRM information in the partner form """
_inherit = 'res.partner'
def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids))
# the user may not have access rights for opportunities or meetings
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = {
'opportunity_count': len(partner.opportunity_ids),
'meeting_count': len(partner.meeting_ids),
}
except:
pass
for partner in self.browse(cr, uid, ids, context):
res[partner.id]['phonecall_count'] = len(partner.phonecall_ids)
return res
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\
'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]),
'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id',
'Meetings'),
'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\
'Phonecalls'),
'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'),
'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'),
'phonecall_count': fields.function(_opportunity_meeting_phonecall_count, string="Phonecalls", type="integer", multi='opp_meet'),
}
def redirect_partner_form(self, cr, uid, partner_id, context=None):
search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter')
value = {
'domain': "[]",
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'res.partner',
'res_id': int(partner_id),
'view_id': False,
'context': context,
'type': 'ir.actions.act_window',
'search_view_id': search_view and search_view[1] or False
}
return value
def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None):
categ_obj = self.pool.get('crm.case.categ')
categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')])
lead_obj = self.pool.get('crm.lead')
opportunity_ids = {}
for partner in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner.id
opportunity_id = lead_obj.create(cr, uid, {
'name' : opportunity_summary,
'planned_revenue' : planned_revenue,
'probability' : probability,
'partner_id' : partner_id,
'categ_ids' : categ_ids and categ_ids[0:1] or [],
'type': 'opportunity'
}, context=context)
opportunity_ids[partner_id] = opportunity_id
return opportunity_ids
def schedule_meeting(self, cr, uid, ids, context=None):
partner_ids = list(ids)
partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context)
res['context'] = {
'search_default_partner_ids': list(ids),
'default_partner_ids': partner_ids,
}
return res
| agpl-3.0 | -9,089,930,205,144,933,000 | 47.877551 | 139 | 0.587265 | false |
viewdy/phantomjs2 | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/watchlist.py | 134 | 2280 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for the watchlist file."""
from webkitpy.common.watchlist.watchlistparser import WatchListParser
class WatchListChecker(object):
"""Processes the watch list for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
def log_to_style_error(message):
# Always report line 0 since we don't have anything better.
self._handle_style_error(0,
'watchlist/general', 5,
message)
WatchListParser(log_error=log_to_style_error).parse('\n'.join(lines))
| bsd-3-clause | 6,808,277,061,204,682,000 | 43.705882 | 77 | 0.726754 | false |
polojacky/ehfpi | ehf/filebrowser/management/commands/fb_version_remove.py | 13 | 5220 | # coding: utf-8
# PYTHON IMPORTS
import os
import re
# DJANGO IMPORTS
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils.six.moves import input
# FILEBROWSER IMPORTS
from filebrowser.settings import EXTENSION_LIST, EXCLUDE, DIRECTORY, VERSIONS, EXTENSIONS
class Command(BaseCommand):
args = '<media_path>'
help = "Remove Image-Versions within FILEBROWSER_DIRECTORY/MEDIA_ROOT."
def handle(self, *args, **options):
media_path = ""
if len(args):
media_path = args[0]
path = os.path.join(settings.MEDIA_ROOT, media_path)
if not os.path.isdir(path):
raise CommandError('<media_path> must be a directory in MEDIA_ROOT. "%s" is no directory.' % path)
self.stdout.write("\n%s\n" % self.help)
self.stdout.write("in this case: %s\n" % path)
# get suffix or prefix
default_prefix_or_suffix = "s"
while 1:
self.stdout.write('\nOlder versions of the FileBrowser used to prefix the filename with the version name.\n')
self.stdout.write('Current version of the FileBrowser adds the version name as suffix.\n')
prefix_or_suffix = input('"p" for prefix or "s" for suffix (leave blank for "%s"): ' % default_prefix_or_suffix)
if default_prefix_or_suffix and prefix_or_suffix == '':
prefix_or_suffix = default_prefix_or_suffix
if prefix_or_suffix != "s" and prefix_or_suffix != "p":
sys.stderr.write('Error: "p" and "s" are the only valid inputs.\n')
prefix_or_suffix = None
continue
break
# get version name
while 1:
version_name = input('\nversion name as defined with VERSIONS: ')
if version_name == "":
self.stderr.write('Error: You have to enter a version name.\n')
version_name = None
continue
else:
break
# get list of all matching files
files = self.get_files(path, version_name, (prefix_or_suffix == "p"))
# output (short version) of files to be deleted
if len(files) > 15:
self.stdout.write('\nFirst/Last 5 files to remove:\n')
for current_file in files[:5]:
self.stdout.write('%s\n' % current_file)
self.stdout.write('...\n')
self.stdout.write('...\n')
for current_file in files[len(files)-5:]:
self.stdout.write('%s\n' % current_file)
else:
self.stdout.write('\nFiles to remove:\n')
for current_file in files:
self.stdout.write('%s\n' % current_file)
# no files...done
if len(files) == 0:
self.stdout.write('0 files removed.\n\n')
return
else:
self.stdout.write('%d file(s) will be removed.\n\n' % len(files))
# ask to make sure
do_remove = ""
self.stdout.write('Are Sure you want to delete these files?\n')
do_remove = input('"y" for Yes or "n" for No (leave blank for "n"): ')
# if "yes" we delete. any different case we finish without removing anything
if do_remove == "y":
for current_file in files:
os.remove(current_file)
self.stdout.write('%d file(s) removed.\n\n' % len(files))
else:
self.stdout.write('No files removed.\n\n')
return
# get files mathing:
# path: search recoursive in this path (os.walk)
# version_name: string is pre/suffix of filename
# search_for_prefix: if true we match against the start of the filename (default is the end)
def get_files(self, path, version_name, search_for_prefix):
file_list = []
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
# walkt throu the filebrowser directory
# for all/new files (except file versions itself and excludes)
for dirpath, dirnames, filenames in os.walk(path, followlinks=True):
for filename in filenames:
filtered = False
# no "hidden" files (stating with ".")
if filename.startswith('.'):
continue
# check the exclude list
for re_prefix in filter_re:
if re_prefix.search(filename):
filtered = True
if filtered:
continue
(filename_noext, extension) = os.path.splitext(filename)
# images only
if extension in EXTENSIONS["Image"]:
# if image matches with version_name we add it to the file_list
if search_for_prefix:
if filename_noext.startswith(version_name + "_"):
file_list.append(os.path.join(dirpath, filename))
elif filename_noext.endswith("_" + version_name):
file_list.append(os.path.join(dirpath, filename))
return file_list
| apache-2.0 | -1,748,148,750,258,710,800 | 37.955224 | 124 | 0.563218 | false |
wildtetris/python-social-auth | social/tests/backends/test_stocktwits.py | 91 | 1658 | import json
from social.tests.backends.oauth import OAuth2Test
class StocktwitsOAuth2Test(OAuth2Test):
backend_path = 'social.backends.stocktwits.StocktwitsOAuth2'
user_data_url = 'https://api.stocktwits.com/api/2/account/verify.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'response': {
'status': 200
},
'user': {
'username': 'foobar',
'name': 'Foo Bar',
'classification': [],
'avatar_url': 'http://avatars.stocktwits.net/images/'
'default_avatar_thumb.jpg',
'avatar_url_ssl': 'https://s3.amazonaws.com/st-avatars/images/'
'default_avatar_thumb.jpg',
'id': 101010,
'identity': 'User'
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class StocktwitsOAuth2UsernameAlternativeTest(StocktwitsOAuth2Test):
user_data_body = json.dumps({
'response': {
'status': 200
},
'user': {
'username': 'foobar',
'name': 'Foobar',
'classification': [],
'avatar_url': 'http://avatars.stocktwits.net/images/'
'default_avatar_thumb.jpg',
'avatar_url_ssl': 'https://s3.amazonaws.com/st-avatars/images/'
'default_avatar_thumb.jpg',
'id': 101010,
'identity': 'User'
}
})
| bsd-3-clause | 1,666,466,641,908,245,500 | 29.703704 | 75 | 0.520507 | false |
young-geng/leet_code | problems/200_number-of-islands/main.py | 1 | 2271 | # https://leetcode.com/problems/number-of-islands/
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0: return 0
cols,rows = len(grid), len(grid[0])
def dfs(i, j):
"""
helper function to start filling every cell reachable from (i,j)
with '0's. A variation of Flood Fill algorithm for connected
components.
"""
if i >= 0 and j >= 0 and i < cols and j < rows:
# if not visited, mark it '0' and run dfs
if grid[i][j] == '1':
grid[i][j] = '0'
# vertical move
dfs(i-1, j)
dfs(i+1, j)
# horizontal move
dfs(i, j-1)
dfs(i, j+1)
cc_count = 0
for i in xrange(cols):
for j in xrange(rows):
if grid[i][j] == '1':
dfs(i, j)
cc_count += 1
return cc_count
# BFS Solution for practice
# Runtime: O(cN) => O(N)
import Queue
class BFSSolution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0: return 0
cols, rows = len(grid), len(grid[0])
cc_count = 0
for i in xrange(cols):
for j in xrange(rows):
if grid[i][j] == '1':
self.bfs(grid, i, j)
cc_count += 1
return cc_count
def bfs(self, grid, i, j):
cols, rows = len(grid), len(grid[0])
q = Queue.Queue()
q.put([i,j])
while not q.empty():
x, y = q.get()
# if the point is not visited, a.k.a '1'
if grid[x][y] == '1':
grid[x][y] = '0'
self.pushToQueue(q, cols, rows, x-1, y)
self.pushToQueue(q, cols, rows, x+1, y)
self.pushToQueue(q, cols, rows, x, y-1)
self.pushToQueue(q, cols, rows, x, y+1)
def pushToQueue(self, q, cols, rows, i, j):
if i >= 0 and i < cols and j >= 0 and j < rows:
q.put([i,j])
| mit | 1,489,147,805,406,517,500 | 30.985915 | 76 | 0.432409 | false |
onponomarev/ganeti | lib/build/sphinx_ext.py | 3 | 18231 | #
#
# Copyright (C) 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Sphinx extension for building opcode documentation.
"""
# pylint: disable=C0413
# C0413: Wrong import position
import re
from cStringIO import StringIO
import docutils.statemachine
import docutils.nodes
import docutils.utils
import docutils.parsers.rst
import sphinx.errors
import sphinx.util.compat
import sphinx.roles
import sphinx.addnodes
s_compat = sphinx.util.compat
try:
# Access to a protected member of a client class
# pylint: disable=W0212
orig_manpage_role = docutils.parsers.rst.roles._roles["manpage"]
except (AttributeError, ValueError, KeyError), err:
# Normally the "manpage" role is registered by sphinx/roles.py
raise Exception("Can't find reST role named 'manpage': %s" % err)
from ganeti import _constants
from ganeti import constants
from ganeti import compat
from ganeti import errors
from ganeti import utils
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import ht
from ganeti import rapi
from ganeti import luxi
from ganeti import objects
from ganeti import http
from ganeti import pathutils
import ganeti.rapi.rlib2 # pylint: disable=W0611
import ganeti.rapi.connector # pylint: disable=W0611
#: Regular expression for man page names
_MAN_RE = re.compile(r"^(?P<name>[-\w_]+)\((?P<section>\d+)\)$")
_TAB_WIDTH = 2
RAPI_URI_ENCODE_RE = re.compile("[^_a-z0-9]+", re.I)
class ReSTError(Exception):
"""Custom class for generating errors in Sphinx.
"""
def _GetCommonParamNames():
"""Builds a list of parameters common to all opcodes.
"""
names = set(map(compat.fst, opcodes.OpCode.OP_PARAMS))
# The "depends" attribute should be listed
names.remove(opcodes_base.DEPEND_ATTR)
return names
COMMON_PARAM_NAMES = _GetCommonParamNames()
#: Namespace for evaluating expressions
EVAL_NS = dict(compat=compat, constants=constants, utils=utils, errors=errors,
rlib2=rapi.rlib2, luxi=luxi, rapi=rapi, objects=objects,
http=http, pathutils=pathutils)
# Constants documentation for man pages
CV_ECODES_DOC = "ecodes"
# We don't care about the leak of variables _, name and doc here.
# pylint: disable=W0621
CV_ECODES_DOC_LIST = [(name, doc) for (_, name, doc) in constants.CV_ALL_ECODES]
DOCUMENTED_CONSTANTS = {
CV_ECODES_DOC: CV_ECODES_DOC_LIST,
}
class OpcodeError(sphinx.errors.SphinxError):
category = "Opcode error"
def _SplitOption(text):
"""Split simple option list.
@type text: string
@param text: Options, e.g. "foo, bar, baz"
"""
return [i.strip(",").strip() for i in text.split()]
def _ParseAlias(text):
"""Parse simple assignment option.
@type text: string
@param text: Assignments, e.g. "foo=bar, hello=world"
@rtype: dict
"""
result = {}
for part in _SplitOption(text):
if "=" not in part:
raise OpcodeError("Invalid option format, missing equal sign")
(name, value) = part.split("=", 1)
result[name.strip()] = value.strip()
return result
def _BuildOpcodeParams(op_id, include, exclude, alias):
"""Build opcode parameter documentation.
@type op_id: string
@param op_id: Opcode ID
"""
op_cls = opcodes.OP_MAPPING[op_id]
params_with_alias = \
utils.NiceSort([(alias.get(name, name), name, default, test, doc)
for (name, default, test, doc) in op_cls.GetAllParams()],
key=compat.fst)
for (rapi_name, name, default, test, doc) in params_with_alias:
# Hide common parameters if not explicitly included
if (name in COMMON_PARAM_NAMES and
(not include or name not in include)):
continue
if exclude is not None and name in exclude:
continue
if include is not None and name not in include:
continue
has_default = default is not None or default is not ht.NoDefault
has_test = test is not None
buf = StringIO()
buf.write("``%s``" % (rapi_name,))
if has_default or has_test:
buf.write(" (")
if has_default:
if default == "":
buf.write("defaults to the empty string")
else:
buf.write("defaults to ``%s``" % (default,))
if has_test:
buf.write(", ")
if has_test:
buf.write("must be ``%s``" % (test,))
buf.write(")")
yield buf.getvalue()
# Add text
for line in doc.splitlines():
yield " %s" % line
def _BuildOpcodeResult(op_id):
"""Build opcode result documentation.
@type op_id: string
@param op_id: Opcode ID
"""
op_cls = opcodes.OP_MAPPING[op_id]
result_fn = getattr(op_cls, "OP_RESULT", None)
if not result_fn:
raise OpcodeError("Opcode '%s' has no result description" % op_id)
return "``%s``" % result_fn
class OpcodeParams(s_compat.Directive):
"""Custom directive for opcode parameters.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = dict(include=_SplitOption, exclude=_SplitOption,
alias=_ParseAlias)
def run(self):
op_id = self.arguments[0]
include = self.options.get("include", None)
exclude = self.options.get("exclude", None)
alias = self.options.get("alias", {})
path = op_id
include_text = "\n\n".join(_BuildOpcodeParams(op_id,
include,
exclude,
alias))
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, path)
return []
class OpcodeResult(s_compat.Directive):
"""Custom directive for opcode result.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def run(self):
op_id = self.arguments[0]
path = op_id
include_text = _BuildOpcodeResult(op_id)
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, path)
return []
def PythonEvalRole(role, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Custom role to evaluate Python expressions.
The expression's result is included as a literal.
"""
# pylint: disable=W0102,W0613
# W0102: Dangerous default value as argument
# W0613: Unused argument
code = docutils.utils.unescape(text, restore_backslashes=True)
try:
result = eval(code, EVAL_NS) # pylint: disable=W0123
except Exception, err: # pylint: disable=W0703
msg = inliner.reporter.error("Failed to evaluate %r: %s" % (code, err),
line=lineno)
return ([inliner.problematic(rawtext, rawtext, msg)], [msg])
node = docutils.nodes.literal("", unicode(result), **options)
return ([node], [])
class PythonAssert(s_compat.Directive):
"""Custom directive for writing assertions.
The content must be a valid Python expression. If its result does not
evaluate to C{True}, the assertion fails.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
def run(self):
# Handle combinations of Sphinx and docutils not providing the wanted method
if hasattr(self, "assert_has_content"):
self.assert_has_content()
else:
assert self.content
code = "\n".join(self.content)
try:
result = eval(code, EVAL_NS) # pylint: disable=W0123
except Exception, err:
raise self.error("Failed to evaluate %r: %s" % (code, err))
if not result:
raise self.error("Assertion failed: %s" % (code, ))
return []
def BuildQueryFields(fields):
"""Build query fields documentation.
@type fields: dict (field name as key, field details as value)
"""
defs = [(fdef.name, fdef.doc)
for (_, (fdef, _, _, _)) in utils.NiceSort(fields.items(),
key=compat.fst)]
return BuildValuesDoc(defs)
def BuildValuesDoc(values):
"""Builds documentation for a list of values
@type values: list of tuples in the form (value, documentation)
"""
for name, doc in values:
assert len(doc.splitlines()) == 1
yield "``%s``" % (name,)
yield " %s" % (doc,)
def _ManPageNodeClass(*args, **kwargs):
"""Generates a pending XRef like a ":doc:`...`" reference.
"""
# Type for sphinx/environment.py:BuildEnvironment.resolve_references
kwargs["reftype"] = "doc"
# Force custom title
kwargs["refexplicit"] = True
return sphinx.addnodes.pending_xref(*args, **kwargs)
class _ManPageXRefRole(sphinx.roles.XRefRole):
def __init__(self):
"""Initializes this class.
"""
sphinx.roles.XRefRole.__init__(self, nodeclass=_ManPageNodeClass,
warn_dangling=True)
assert not hasattr(self, "converted"), \
"Sphinx base class gained an attribute named 'converted'"
self.converted = None
def process_link(self, env, refnode, has_explicit_title, title, target):
"""Specialization for man page links.
"""
if has_explicit_title:
raise ReSTError("Setting explicit title is not allowed for man pages")
# Check format and extract name and section
m = _MAN_RE.match(title)
if not m:
raise ReSTError("Man page reference '%s' does not match regular"
" expression '%s'" % (title, _MAN_RE.pattern))
name = m.group("name")
section = int(m.group("section"))
wanted_section = _constants.MAN_PAGES.get(name, None)
if not (wanted_section is None or wanted_section == section):
raise ReSTError("Referenced man page '%s' has section number %s, but the"
" reference uses section %s" %
(name, wanted_section, section))
self.converted = bool(wanted_section is not None and
env.app.config.enable_manpages)
if self.converted:
# Create link to known man page
return (title, "man-%s" % name)
else:
# No changes
return (title, target)
def _ManPageRole(typ, rawtext, text, lineno, inliner, # pylint: disable=W0102
options={}, content=[]):
"""Custom role for man page references.
Converts man pages to links if enabled during the build.
"""
xref = _ManPageXRefRole()
assert ht.TNone(xref.converted)
# Check if it's a known man page
try:
result = xref(typ, rawtext, text, lineno, inliner,
options=options, content=content)
except ReSTError, err:
msg = inliner.reporter.error(str(err), line=lineno)
return ([inliner.problematic(rawtext, rawtext, msg)], [msg])
assert ht.TBool(xref.converted)
# Return if the conversion was successful (i.e. the man page was known and
# conversion was enabled)
if xref.converted:
return result
# Fallback if man page links are disabled or an unknown page is referenced
return orig_manpage_role(typ, rawtext, text, lineno, inliner,
options=options, content=content)
def _EncodeRapiResourceLink(method, uri):
"""Encodes a RAPI resource URI for use as a link target.
"""
parts = [RAPI_URI_ENCODE_RE.sub("-", uri.lower()).strip("-")]
if method is not None:
parts.append(method.lower())
return "rapi-res-%s" % "+".join(filter(None, parts))
def _MakeRapiResourceLink(method, uri):
"""Generates link target name for RAPI resource.
"""
if uri in ["/", "/2"]:
# Don't link these
return None
elif uri == "/version":
return _EncodeRapiResourceLink(method, uri)
elif uri.startswith("/2/"):
return _EncodeRapiResourceLink(method, uri[len("/2/"):])
else:
raise ReSTError("Unhandled URI '%s'" % uri)
def _GetHandlerMethods(handler):
"""Returns list of HTTP methods supported by handler class.
@type handler: L{rapi.baserlib.ResourceBase}
@param handler: Handler class
@rtype: list of strings
"""
return sorted(m_attrs.method for m_attrs in rapi.baserlib.OPCODE_ATTRS
# Only if handler supports method
if hasattr(handler, m_attrs.method) or
hasattr(handler, m_attrs.opcode))
def _DescribeHandlerAccess(handler, method):
"""Returns textual description of required RAPI permissions.
@type handler: L{rapi.baserlib.ResourceBase}
@param handler: Handler class
@type method: string
@param method: HTTP method (e.g. L{http.HTTP_GET})
@rtype: string
"""
access = rapi.baserlib.GetHandlerAccess(handler, method)
if access:
return utils.CommaJoin(sorted(access))
else:
return "*(none)*"
class _RapiHandlersForDocsHelper(object):
@classmethod
def Build(cls):
"""Returns dictionary of resource handlers.
"""
resources = \
rapi.connector.GetHandlers("[node_name]", "[instance_name]",
"[group_name]", "[network_name]", "[job_id]",
"[disk_index]", "[filter_uuid]",
"[resource]",
translate=cls._TranslateResourceUri)
return resources
@classmethod
def _TranslateResourceUri(cls, *args):
"""Translates a resource URI for use in documentation.
@see: L{rapi.connector.GetHandlers}
"""
return "".join(map(cls._UriPatternToString, args))
@staticmethod
def _UriPatternToString(value):
"""Converts L{rapi.connector.UriPattern} to strings.
"""
if isinstance(value, rapi.connector.UriPattern):
return value.content
else:
return value
_RAPI_RESOURCES_FOR_DOCS = _RapiHandlersForDocsHelper.Build()
def _BuildRapiAccessTable(res):
"""Build a table with access permissions needed for all RAPI resources.
"""
for (uri, handler) in utils.NiceSort(res.items(), key=compat.fst):
reslink = _MakeRapiResourceLink(None, uri)
if not reslink:
# No link was generated
continue
yield ":ref:`%s <%s>`" % (uri, reslink)
for method in _GetHandlerMethods(handler):
yield (" | :ref:`%s <%s>`: %s" %
(method, _MakeRapiResourceLink(method, uri),
_DescribeHandlerAccess(handler, method)))
class RapiAccessTable(s_compat.Directive):
"""Custom directive to generate table of all RAPI resources.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
include_text = "\n".join(_BuildRapiAccessTable(_RAPI_RESOURCES_FOR_DOCS))
# Inject into state machine
include_lines = docutils.statemachine.string2lines(include_text, _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, self.__class__.__name__)
return []
class RapiResourceDetails(s_compat.Directive):
"""Custom directive for RAPI resource details.
See also <http://docutils.sourceforge.net/docs/howto/rst-directives.html>.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
def run(self):
uri = self.arguments[0]
try:
handler = _RAPI_RESOURCES_FOR_DOCS[uri]
except KeyError:
raise self.error("Unknown resource URI '%s'" % uri)
lines = [
".. list-table::",
" :widths: 1 4",
" :header-rows: 1",
"",
" * - Method",
" - :ref:`Required permissions <rapi-users>`",
]
for method in _GetHandlerMethods(handler):
lines.extend([
" * - :ref:`%s <%s>`" % (method, _MakeRapiResourceLink(method, uri)),
" - %s" % _DescribeHandlerAccess(handler, method),
])
# Inject into state machine
include_lines = \
docutils.statemachine.string2lines("\n".join(lines), _TAB_WIDTH,
convert_whitespace=1)
self.state_machine.insert_input(include_lines, self.__class__.__name__)
return []
def setup(app):
"""Sphinx extension callback.
"""
# TODO: Implement Sphinx directive for query fields
app.add_directive("opcode_params", OpcodeParams)
app.add_directive("opcode_result", OpcodeResult)
app.add_directive("pyassert", PythonAssert)
app.add_role("pyeval", PythonEvalRole)
app.add_directive("rapi_access_table", RapiAccessTable)
app.add_directive("rapi_resource_details", RapiResourceDetails)
app.add_config_value("enable_manpages", False, True)
app.add_role("manpage", _ManPageRole)
| bsd-2-clause | 7,446,492,175,958,281,000 | 27.090909 | 80 | 0.651582 | false |
anryko/ansible | lib/ansible/modules/network/netvisor/pn_port_config.py | 38 | 12285 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_port_config
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: 2.8
short_description: CLI command to modify port-config
description:
- This module can be used to modify a port configuration.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(update) to modify the port-config.
required: True
type: str
choices: ['update']
pn_intf:
description:
- physical interface.
required: False
type: str
pn_crc_check_enable:
description:
- CRC check on ingress and rewrite on egress.
required: False
type: bool
pn_dscp_map:
description:
- DSCP map name to enable on port.
required: False
type: str
pn_autoneg:
description:
- physical port autonegotiation.
required: False
type: bool
pn_speed:
description:
- physical port speed.
required: False
choices: ['disable', '10m', '100m', '1g',
'2.5g', '10g', '25g', '40g', '50g', '100g']
pn_port:
description:
- physical port.
required: False
type: str
pn_vxlan_termination:
description:
- physical port vxlan termination setting.
required: False
type: bool
pn_pause:
description:
- physical port pause.
required: False
type: bool
pn_loopback:
description:
- physical port loopback.
required: False
type: bool
pn_loop_vlans:
description:
- looping vlans.
required: False
type: str
pn_routing:
description:
- routing.
required: False
type: bool
pn_edge_switch:
description:
- physical port edge switch.
required: False
type: bool
pn_enable:
description:
- physical port enable.
required: False
type: bool
pn_description:
description:
- physical port description.
required: False
type: str
pn_host_enable:
description:
- Host facing port control setting.
required: False
type: bool
pn_allowed_tpid:
description:
- Allowed TPID in addition to 0x8100 on Vlan header.
required: False
type: str
choices: ['vlan', 'q-in-q', 'q-in-q-old']
pn_mirror_only:
description:
- physical port mirror only.
required: False
type: bool
pn_reflect:
description:
- physical port reflection.
required: False
type: bool
pn_jumbo:
description:
- jumbo frames on physical port.
required: False
type: bool
pn_egress_rate_limit:
description:
- max egress port data rate limit.
required: False
type: str
pn_eth_mode:
description:
- physical Ethernet mode.
required: False
choices: ['1000base-x', 'sgmii', 'disabled', 'GMII']
pn_fabric_guard:
description:
- Fabric guard configuration.
required: False
type: bool
pn_local_switching:
description:
- no-local-switching port cannot bridge traffic to
another no-local-switching port.
required: False
type: bool
pn_lacp_priority:
description:
- LACP priority from 1 to 65535.
required: False
type: str
pn_send_port:
description:
- send port.
required: False
type: str
pn_port_mac_address:
description:
- physical port MAC Address.
required: False
type: str
pn_defer_bringup:
description:
- defer port bringup.
required: False
type: bool
"""
EXAMPLES = """
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_dscp_map: "foo"
- name: port config modify
pn_port_config:
pn_cliswitch: "sw01"
state: "update"
pn_port: "all"
pn_host_enable: true
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the port-config command.
returned: always
type: list
stderr:
description: set of error responses from the port-config command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the dscp-map-show name command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
name = module.params['pn_dscp_map']
cli += ' dscp-map-show name %s format name no-show-headers' % name
out = run_commands(module, cli)[1]
out = out.split()
return True if name in out[-1] else False
def main():
""" This section is for arguments parsing """
state_map = dict(
update='port-config-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=['update']),
pn_intf=dict(required=False, type='str'),
pn_crc_check_enable=dict(required=False, type='bool'),
pn_dscp_map=dict(required=False, type='str'),
pn_autoneg=dict(required=False, type='bool'),
pn_speed=dict(required=False, type='str',
choices=['disable', '10m', '100m',
'1g', '2.5g', '10g', '25g',
'40g', '50g', '100g']),
pn_port=dict(required=False, type='str'),
pn_vxlan_termination=dict(required=False, type='bool'),
pn_pause=dict(required=False, type='bool'),
pn_loopback=dict(required=False, type='bool'),
pn_loop_vlans=dict(required=False, type='str'),
pn_routing=dict(required=False, type='bool'),
pn_edge_switch=dict(required=False, type='bool'),
pn_enable=dict(required=False, type='bool'),
pn_description=dict(required=False, type='str'),
pn_host_enable=dict(required=False, type='bool'),
pn_allowed_tpid=dict(required=False, type='str',
choices=['vlan', 'q-in-q', 'q-in-q-old']),
pn_mirror_only=dict(required=False, type='bool'),
pn_reflect=dict(required=False, type='bool'),
pn_jumbo=dict(required=False, type='bool'),
pn_egress_rate_limit=dict(required=False, type='str'),
pn_eth_mode=dict(required=False, type='str',
choices=['1000base-x', 'sgmii',
'disabled', 'GMII']),
pn_fabric_guard=dict(required=False, type='bool'),
pn_local_switching=dict(required=False, type='bool'),
pn_lacp_priority=dict(required=False, type='str'),
pn_send_port=dict(required=False, type='str'),
pn_port_mac_address=dict(required=False, type='str'),
pn_defer_bringup=dict(required=False, type='bool'),
),
required_if=(
['state', 'update', ['pn_port']],
),
required_one_of=[['pn_intf', 'pn_crc_check_enable', 'pn_dscp_map',
'pn_speed', 'pn_autoneg',
'pn_vxlan_termination', 'pn_pause',
'pn_fec', 'pn_loopback', 'pn_loop_vlans',
'pn_routing', 'pn_edge_switch',
'pn_enable', 'pn_description',
'pn_host_enable', 'pn_allowed_tpid',
'pn_mirror_only', 'pn_reflect',
'pn_jumbo', 'pn_egress_rate_limit',
'pn_eth_mode', 'pn_fabric_guard',
'pn_local_switching', 'pn_lacp_priority',
'pn_send_port', 'pn_port_mac_address',
'pn_defer_bringup']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
intf = module.params['pn_intf']
crc_check_enable = module.params['pn_crc_check_enable']
dscp_map = module.params['pn_dscp_map']
autoneg = module.params['pn_autoneg']
speed = module.params['pn_speed']
port = module.params['pn_port']
vxlan_termination = module.params['pn_vxlan_termination']
pause = module.params['pn_pause']
loopback = module.params['pn_loopback']
loop_vlans = module.params['pn_loop_vlans']
routing = module.params['pn_routing']
edge_switch = module.params['pn_edge_switch']
enable = module.params['pn_enable']
description = module.params['pn_description']
host_enable = module.params['pn_host_enable']
allowed_tpid = module.params['pn_allowed_tpid']
mirror_only = module.params['pn_mirror_only']
reflect = module.params['pn_reflect']
jumbo = module.params['pn_jumbo']
egress_rate_limit = module.params['pn_egress_rate_limit']
eth_mode = module.params['pn_eth_mode']
fabric_guard = module.params['pn_fabric_guard']
local_switching = module.params['pn_local_switching']
lacp_priority = module.params['pn_lacp_priority']
send_port = module.params['pn_send_port']
port_mac_address = module.params['pn_port_mac_address']
defer_bringup = module.params['pn_defer_bringup']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
if dscp_map:
NAME_EXISTS = check_cli(module, cli)
if command == 'port-config-modify':
cli += ' %s ' % command
if dscp_map:
if NAME_EXISTS is False:
module.fail_json(
failed=True,
msg='Create dscp map with name %s before updating' % dscp_map
)
cli += ' dscp-map ' + dscp_map
if intf:
cli += ' intf ' + intf
if speed:
cli += ' speed ' + speed
if port:
cli += ' port ' + port
if allowed_tpid:
cli += ' allowed-tpid ' + allowed_tpid
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if eth_mode:
cli += ' eth-mode ' + eth_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if send_port:
cli += ' send-port ' + send_port
if port_mac_address:
cli += ' port-mac-address ' + port_mac_address
cli += booleanArgs(crc_check_enable, 'crc-check-enable', 'crc-check-disable')
cli += booleanArgs(autoneg, 'autoneg', 'no-autoneg')
cli += booleanArgs(vxlan_termination, 'vxlan-termination', 'no-vxlan-termination')
cli += booleanArgs(pause, 'pause', 'no-pause')
cli += booleanArgs(loopback, 'loopback', 'no-loopback')
cli += booleanArgs(routing, 'routing', 'no-routing')
cli += booleanArgs(edge_switch, 'edge-switch', 'no-edge-switch')
cli += booleanArgs(enable, 'enable', 'disable')
cli += booleanArgs(host_enable, 'host-enable', 'host-disable')
cli += booleanArgs(mirror_only, 'mirror-only', 'no-mirror-receive-only')
cli += booleanArgs(reflect, 'reflect', 'no-reflect')
cli += booleanArgs(jumbo, 'jumbo', 'no-jumbo')
cli += booleanArgs(fabric_guard, 'fabric-guard', 'no-fabric-guard')
cli += booleanArgs(local_switching, 'local-switching', 'no-local-switching')
cli += booleanArgs(defer_bringup, 'defer-bringup', 'no-defer-bringup')
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 | 2,141,048,966,767,813,000 | 31.075718 | 92 | 0.591127 | false |
UOMx/edx-platform | lms/djangoapps/instructor/views/instructor_dashboard.py | 3 | 30702 | """
Instructor Dashboard Views
"""
import logging
import datetime
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
import uuid
import pytz
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from edxmako.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django.http import Http404, HttpResponseServerError
from django.conf import settings
from util.json_request import JsonResponse
from mock import patch
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR
from student.models import CourseEnrollment
from shoppingcart.models import Coupon, PaidCourseRegistration, CourseRegCodeItem
from course_modes.models import CourseMode, CourseModesArchive
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole
from certificates.models import (
CertificateGenerationConfiguration,
CertificateWhitelist,
GeneratedCertificate,
CertificateStatuses,
CertificateGenerationHistory,
CertificateInvalidation,
)
from certificates import api as certs_api
from util.date_utils import get_default_time_display
from class_dashboard.dashboard_data import get_section_display_name, get_array_section_has_problem
from .tools import get_units_with_due_date, title_or_url, bulk_email_is_enabled_for_course
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if settings.ANALYTICS_DASHBOARD_URL:
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = "<a href=\"{}\" target=\"_blank\">".format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = analytics_dashboard_message.format(
link_start=link_start, link_end="</a>", analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if bulk_email_is_enabled_for_course(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, is_white_label))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
# NOTE: For now, if we only have procotred exams enabled, then only platform Staff
# (user.is_staff) will be able to view the special exams tab. This may
# change in the future
can_see_special_exams = (
((course.enable_proctored_exams and request.user.is_staff) or course.enable_timed_exams) and
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False)
)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
certs_enabled = CertificateGenerationConfiguration.current().enabled
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course.id, course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_id': course_key,
'course_display_name': course.display_name,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': get_default_time_display(course.start),
'end_date': get_default_time_display(course.end) or _('No end date set'),
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if settings.ANALYTICS_DASHBOARD_URL:
dashboard_link = _get_dashboard_link(course_key)
message = _("Enrollment data is now available in {dashboard_link}.").format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
# section_data['offline_grades'] = offline_grades_available(course_key)
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'discussion_topics_url': reverse('cohort_discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = u"<a href=\"{0}\" target=\"_blank\">{1}</a>".format(analytics_dashboard_url,
settings.ANALYTICS_DASHBOARD_NAME)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
| agpl-3.0 | -2,959,132,528,388,407,000 | 45.659574 | 128 | 0.669077 | false |
agentxan/plugin.video.emby | resources/lib/mutagen/easyid3.py | 38 | 15099 | # -*- coding: utf-8 -*-
# Copyright (C) 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
"""Easier access to ID3 tags.
EasyID3 is a wrapper around mutagen.id3.ID3 to make ID3 tags appear
more like Vorbis or APEv2 tags.
"""
import mutagen.id3
from ._compat import iteritems, text_type, PY2
from mutagen import Metadata
from mutagen._util import DictMixin, dict_match
from mutagen.id3 import ID3, error, delete, ID3FileType
__all__ = ['EasyID3', 'Open', 'delete']
class EasyID3KeyError(KeyError, ValueError, error):
"""Raised when trying to get/set an invalid key.
Subclasses both KeyError and ValueError for API compatibility,
catching KeyError is preferred.
"""
class EasyID3(DictMixin, Metadata):
"""A file with an ID3 tag.
Like Vorbis comments, EasyID3 keys are case-insensitive ASCII
strings. Only a subset of ID3 frames are supported by default. Use
EasyID3.RegisterKey and its wrappers to support more.
You can also set the GetFallback, SetFallback, and DeleteFallback
to generic key getter/setter/deleter functions, which are called
if no specific handler is registered for a key. Additionally,
ListFallback can be used to supply an arbitrary list of extra
keys. These can be set on EasyID3 or on individual instances after
creation.
To use an EasyID3 class with mutagen.mp3.MP3::
from mutagen.mp3 import EasyMP3 as MP3
MP3(filename)
Because many of the attributes are constructed on the fly, things
like the following will not work::
ezid3["performer"].append("Joe")
Instead, you must do::
values = ezid3["performer"]
values.append("Joe")
ezid3["performer"] = values
"""
Set = {}
Get = {}
Delete = {}
List = {}
# For compatibility.
valid_keys = Get
GetFallback = None
SetFallback = None
DeleteFallback = None
ListFallback = None
@classmethod
def RegisterKey(cls, key,
getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping.
A key mapping is four functions, a getter, setter, deleter,
and lister. The key may be either a string or a glob pattern.
The getter, deleted, and lister receive an ID3 instance and
the requested key name. The setter also receives the desired
value, which will be a list of strings.
The getter, setter, and deleter are used to implement __getitem__,
__setitem__, and __delitem__.
The lister is used to implement keys(). It should return a
list of keys that are actually in the ID3 instance, provided
by its associated getter.
"""
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister
@classmethod
def RegisterTextKey(cls, key, frameid):
"""Register a text key.
If the key you need to register is a simple one-to-one mapping
of ID3 frame name to EasyID3 key, then you can use this
function::
EasyID3.RegisterTextKey("title", "TIT2")
"""
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
id3.add(mutagen.id3.Frames[frameid](encoding=3, text=value))
else:
frame.encoding = 3
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
@classmethod
def RegisterTXXXKey(cls, key, desc):
"""Register a user-defined text frame key.
Some ID3 tags are stored in TXXX frames, which allow a
freeform 'description' which acts as a subkey,
e.g. TXXX:BARCODE.::
EasyID3.RegisterTXXXKey('barcode', 'BARCODE').
"""
frameid = "TXXX:" + desc
def getter(id3, key):
return list(id3[frameid])
def setter(id3, key, value):
try:
frame = id3[frameid]
except KeyError:
enc = 0
# Store 8859-1 if we can, per MusicBrainz spec.
for v in value:
if v and max(v) > u'\x7f':
enc = 3
break
id3.add(mutagen.id3.TXXX(encoding=enc, text=value, desc=desc))
else:
frame.text = value
def deleter(id3, key):
del(id3[frameid])
cls.RegisterKey(key, getter, setter, deleter)
def __init__(self, filename=None):
self.__id3 = ID3()
if filename is not None:
self.load(filename)
load = property(lambda s: s.__id3.load,
lambda s, v: setattr(s.__id3, 'load', v))
def save(self, *args, **kwargs):
# ignore v2_version until we support 2.3 here
kwargs.pop("v2_version", None)
self.__id3.save(*args, **kwargs)
delete = property(lambda s: s.__id3.delete,
lambda s, v: setattr(s.__id3, 'delete', v))
filename = property(lambda s: s.__id3.filename,
lambda s, fn: setattr(s.__id3, 'filename', fn))
size = property(lambda s: s.__id3.size,
lambda s, fn: setattr(s.__id3, 'size', s))
def __getitem__(self, key):
key = key.lower()
func = dict_match(self.Get, key, self.GetFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __setitem__(self, key, value):
key = key.lower()
if PY2:
if isinstance(value, basestring):
value = [value]
else:
if isinstance(value, text_type):
value = [value]
func = dict_match(self.Set, key, self.SetFallback)
if func is not None:
return func(self.__id3, key, value)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def __delitem__(self, key):
key = key.lower()
func = dict_match(self.Delete, key, self.DeleteFallback)
if func is not None:
return func(self.__id3, key)
else:
raise EasyID3KeyError("%r is not a valid key" % key)
def keys(self):
keys = []
for key in self.Get.keys():
if key in self.List:
keys.extend(self.List[key](self.__id3, key))
elif key in self:
keys.append(key)
if self.ListFallback is not None:
keys.extend(self.ListFallback(self.__id3, ""))
return keys
def pprint(self):
"""Print tag key=value pairs."""
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings)
Open = EasyID3
def genre_get(id3, key):
return id3["TCON"].genres
def genre_set(id3, key, value):
try:
frame = id3["TCON"]
except KeyError:
id3.add(mutagen.id3.TCON(encoding=3, text=value))
else:
frame.encoding = 3
frame.genres = value
def genre_delete(id3, key):
del(id3["TCON"])
def date_get(id3, key):
return [stamp.text for stamp in id3["TDRC"].text]
def date_set(id3, key, value):
id3.add(mutagen.id3.TDRC(encoding=3, text=value))
def date_delete(id3, key):
del(id3["TDRC"])
def original_date_get(id3, key):
return [stamp.text for stamp in id3["TDOR"].text]
def original_date_set(id3, key, value):
id3.add(mutagen.id3.TDOR(encoding=3, text=value))
def original_date_delete(id3, key):
del(id3["TDOR"])
def performer_get(id3, key):
people = []
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
for role, person in mcl.people:
if role == wanted_role:
people.append(person)
if people:
return people
else:
raise KeyError(key)
def performer_set(id3, key, value):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
mcl = mutagen.id3.TMCL(encoding=3, people=[])
id3.add(mcl)
mcl.encoding = 3
people = [p for p in mcl.people if p[0] != wanted_role]
for v in value:
people.append((wanted_role, v))
mcl.people = people
def performer_delete(id3, key):
wanted_role = key.split(":", 1)[1]
try:
mcl = id3["TMCL"]
except KeyError:
raise KeyError(key)
people = [p for p in mcl.people if p[0] != wanted_role]
if people == mcl.people:
raise KeyError(key)
elif people:
mcl.people = people
else:
del(id3["TMCL"])
def performer_list(id3, key):
try:
mcl = id3["TMCL"]
except KeyError:
return []
else:
return list(set("performer:" + p[0] for p in mcl.people))
def musicbrainz_trackid_get(id3, key):
return [id3["UFID:http://musicbrainz.org"].data.decode('ascii')]
def musicbrainz_trackid_set(id3, key, value):
if len(value) != 1:
raise ValueError("only one track ID may be set per song")
value = value[0].encode('ascii')
try:
frame = id3["UFID:http://musicbrainz.org"]
except KeyError:
frame = mutagen.id3.UFID(owner="http://musicbrainz.org", data=value)
id3.add(frame)
else:
frame.data = value
def musicbrainz_trackid_delete(id3, key):
del(id3["UFID:http://musicbrainz.org"])
def website_get(id3, key):
urls = [frame.url for frame in id3.getall("WOAR")]
if urls:
return urls
else:
raise EasyID3KeyError(key)
def website_set(id3, key, value):
id3.delall("WOAR")
for v in value:
id3.add(mutagen.id3.WOAR(url=v))
def website_delete(id3, key):
id3.delall("WOAR")
def gain_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%+f dB" % frame.gain]
def gain_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one gain value, not %r.", value)
gain = float(value[0].split()[0])
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.gain = gain
def gain_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.peak:
frame.gain = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peak_get(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
raise EasyID3KeyError(key)
else:
return [u"%f" % frame.peak]
def peak_set(id3, key, value):
if len(value) != 1:
raise ValueError(
"there must be exactly one peak value, not %r.", value)
peak = float(value[0])
if peak >= 2 or peak < 0:
raise ValueError("peak must be => 0 and < 2.")
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
frame = mutagen.id3.RVA2(desc=key[11:-5], gain=0, peak=0, channel=1)
id3.add(frame)
frame.peak = peak
def peak_delete(id3, key):
try:
frame = id3["RVA2:" + key[11:-5]]
except KeyError:
pass
else:
if frame.gain:
frame.peak = 0.0
else:
del(id3["RVA2:" + key[11:-5]])
def peakgain_list(id3, key):
keys = []
for frame in id3.getall("RVA2"):
keys.append("replaygain_%s_gain" % frame.desc)
keys.append("replaygain_%s_peak" % frame.desc)
return keys
for frameid, key in iteritems({
"TALB": "album",
"TBPM": "bpm",
"TCMP": "compilation", # iTunes extension
"TCOM": "composer",
"TCOP": "copyright",
"TENC": "encodedby",
"TEXT": "lyricist",
"TLEN": "length",
"TMED": "media",
"TMOO": "mood",
"TIT2": "title",
"TIT3": "version",
"TPE1": "artist",
"TPE2": "performer",
"TPE3": "conductor",
"TPE4": "arranger",
"TPOS": "discnumber",
"TPUB": "organization",
"TRCK": "tracknumber",
"TOLY": "author",
"TSO2": "albumartistsort", # iTunes extension
"TSOA": "albumsort",
"TSOC": "composersort", # iTunes extension
"TSOP": "artistsort",
"TSOT": "titlesort",
"TSRC": "isrc",
"TSST": "discsubtitle",
"TLAN": "language",
}):
EasyID3.RegisterTextKey(key, frameid)
EasyID3.RegisterKey("genre", genre_get, genre_set, genre_delete)
EasyID3.RegisterKey("date", date_get, date_set, date_delete)
EasyID3.RegisterKey("originaldate", original_date_get, original_date_set,
original_date_delete)
EasyID3.RegisterKey(
"performer:*", performer_get, performer_set, performer_delete,
performer_list)
EasyID3.RegisterKey("musicbrainz_trackid", musicbrainz_trackid_get,
musicbrainz_trackid_set, musicbrainz_trackid_delete)
EasyID3.RegisterKey("website", website_get, website_set, website_delete)
EasyID3.RegisterKey(
"replaygain_*_gain", gain_get, gain_set, gain_delete, peakgain_list)
EasyID3.RegisterKey("replaygain_*_peak", peak_get, peak_set, peak_delete)
# At various times, information for this came from
# http://musicbrainz.org/docs/specs/metadata_tags.html
# http://bugs.musicbrainz.org/ticket/1383
# http://musicbrainz.org/doc/MusicBrainzTag
for desc, key in iteritems({
u"MusicBrainz Artist Id": "musicbrainz_artistid",
u"MusicBrainz Album Id": "musicbrainz_albumid",
u"MusicBrainz Album Artist Id": "musicbrainz_albumartistid",
u"MusicBrainz TRM Id": "musicbrainz_trmid",
u"MusicIP PUID": "musicip_puid",
u"MusicMagic Fingerprint": "musicip_fingerprint",
u"MusicBrainz Album Status": "musicbrainz_albumstatus",
u"MusicBrainz Album Type": "musicbrainz_albumtype",
u"MusicBrainz Album Release Country": "releasecountry",
u"MusicBrainz Disc Id": "musicbrainz_discid",
u"ASIN": "asin",
u"ALBUMARTISTSORT": "albumartistsort",
u"BARCODE": "barcode",
u"CATALOGNUMBER": "catalognumber",
u"MusicBrainz Release Track Id": "musicbrainz_releasetrackid",
u"MusicBrainz Release Group Id": "musicbrainz_releasegroupid",
u"MusicBrainz Work Id": "musicbrainz_workid",
u"Acoustid Fingerprint": "acoustid_fingerprint",
u"Acoustid Id": "acoustid_id",
}):
EasyID3.RegisterTXXXKey(key, desc)
class EasyID3FileType(ID3FileType):
"""Like ID3FileType, but uses EasyID3 for tags."""
ID3 = EasyID3
| gpl-2.0 | 6,081,326,306,571,183,000 | 27.275281 | 78 | 0.592026 | false |
AICP/external_chromium_org | tools/deep_memory_profiler/subcommands/upload.py | 123 | 2545 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import tempfile
import zipfile
from lib.subcommand import SubCommand
from lib.symbol import SymbolDataSources
LOGGER = logging.getLogger('dmprof')
class UploadCommand(SubCommand):
def __init__(self):
super(UploadCommand, self).__init__(
'Usage: %prog upload [--gsutil path/to/gsutil] '
'<first-dump> <destination-gs-path>')
self._parser.add_option('--gsutil', default='gsutil',
help='path to GSUTIL', metavar='GSUTIL')
def do(self, sys_argv):
options, args = self._parse_args(sys_argv, 2)
dump_path = args[1]
gs_path = args[2]
dump_files = SubCommand._find_all_dumps(dump_path)
bucket_files = SubCommand._find_all_buckets(dump_path)
prefix = SubCommand._find_prefix(dump_path)
symbol_data_sources = SymbolDataSources(prefix)
symbol_data_sources.prepare()
symbol_path = symbol_data_sources.path()
handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof')
os.close(handle_zip)
try:
file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED)
for filename in dump_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
for filename in bucket_files:
file_zip.write(filename, os.path.basename(os.path.abspath(filename)))
symbol_basename = os.path.basename(os.path.abspath(symbol_path))
for filename in os.listdir(symbol_path):
if not filename.startswith('.'):
file_zip.write(os.path.join(symbol_path, filename),
os.path.join(symbol_basename, os.path.basename(
os.path.abspath(filename))))
file_zip.close()
returncode = UploadCommand._run_gsutil(
options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path)
finally:
os.remove(filename_zip)
return returncode
@staticmethod
def _run_gsutil(gsutil, *args):
"""Run gsutil as a subprocess.
Args:
*args: Arguments to pass to gsutil. The first argument should be an
operation such as ls, cp or cat.
Returns:
The return code from the process.
"""
command = [gsutil] + list(args)
LOGGER.info("Running: %s", command)
try:
return subprocess.call(command)
except OSError, e:
LOGGER.error('Error to run gsutil: %s', e)
| bsd-3-clause | 6,664,372,673,349,126,000 | 31.21519 | 77 | 0.649902 | false |
Endika/OpenUpgrade | addons/sale_journal/__openerp__.py | 262 | 2637 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoicing Journals',
'version': '1.0',
'category': 'Sales Management',
'description': """
The sales journal modules allows you to categorise your sales and deliveries (picking lists) between different journals.
========================================================================================================================
This module is very helpful for bigger companies that works by departments.
You can use journal for different purposes, some examples:
----------------------------------------------------------
* isolate sales of different departments
* journals for deliveries by truck or by UPS
Journals have a responsible and evolves between different status:
-----------------------------------------------------------------
* draft, open, cancel, done.
Batch operations can be processed on the different journals to confirm all sales
at once, to validate or invoice packing.
It also supports batch invoicing methods that can be configured by partners and sales orders, examples:
-------------------------------------------------------------------------------------------------------
* daily invoicing
* monthly invoicing
Some statistics by journals are provided.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['sale_stock'],
'data': [
'security/ir.model.access.csv',
'sale_journal_view.xml',
'sale_journal_data.xml'
],
'demo': ['sale_journal_demo.xml'],
'test': [ ],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 9,167,711,087,034,690,000 | 40.203125 | 120 | 0.566932 | false |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/sphinxext/plot_directive.py | 1 | 28321 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one may specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` option is
specified, the context is reset for this and future plots, and
previous figures are closed prior to running the code.
``:context:close-figs`` keeps the context but closes previous figures
before running the code.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
import warnings
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
try:
with warnings.catch_warnings(record=True):
warnings.simplefilter("error", UserWarning)
matplotlib.use('Agg')
except UserWarning:
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
else:
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False,
close_figs=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
plot_formats = plot_formats.split(',')
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
if ':' in fmt:
suffix,dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
plot_context.clear()
close_figs = not context or close_figs
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close_figs)
elif close_figs:
plt.close('all')
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| mit | 4,961,335,879,209,633,000 | 32.046674 | 81 | 0.565234 | false |
vpelletier/neoppod | neo/storage/app.py | 1 | 13647 | #
# Copyright (C) 2006-2016 Nexedi SA
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from collections import deque
from neo.lib import logging
from neo.lib.app import BaseApplication
from neo.lib.protocol import uuid_str, \
CellStates, ClusterStates, NodeTypes, Packets
from neo.lib.node import NodeManager
from neo.lib.connection import ListeningConnection
from neo.lib.exception import StoppedOperation, PrimaryFailure
from neo.lib.pt import PartitionTable
from neo.lib.util import dump
from neo.lib.bootstrap import BootstrapManager
from .checker import Checker
from .database import buildDatabaseManager
from .exception import AlreadyPendingError
from .handlers import identification, initialization
from .handlers import master, hidden
from .replicator import Replicator
from .transactions import TransactionManager
from neo.lib.debug import register as registerLiveDebugger
class Application(BaseApplication):
"""The storage node application."""
def __init__(self, config):
super(Application, self).__init__(
config.getSSL(), config.getDynamicMasterList())
# set the cluster name
self.name = config.getCluster()
self.tm = TransactionManager(self)
self.dm = buildDatabaseManager(config.getAdapter(),
(config.getDatabase(), config.getEngine(), config.getWait()),
)
# load master nodes
for master_address in config.getMasters():
self.nm.createMaster(address=master_address)
# set the bind address
self.server = config.getBind()
logging.debug('IP address is %s, port is %d', *self.server)
# The partition table is initialized after getting the number of
# partitions.
self.pt = None
self.checker = Checker(self)
self.replicator = Replicator(self)
self.listening_conn = None
self.master_conn = None
self.master_node = None
# operation related data
self.event_queue = None
self.event_queue_dict = None
self.operational = False
# ready is True when operational and got all informations
self.ready = False
self.dm.setup(reset=config.getReset())
self.loadConfiguration()
# force node uuid from command line argument, for testing purpose only
if config.getUUID() is not None:
self.uuid = config.getUUID()
registerLiveDebugger(on_log=self.log)
def close(self):
self.listening_conn = None
self.dm.close()
super(Application, self).close()
def _poll(self):
self.em.poll(1)
def log(self):
self.em.log()
self.logQueuedEvents()
self.nm.log()
self.tm.log()
if self.pt is not None:
self.pt.log()
def loadConfiguration(self):
"""Load persistent configuration data from the database.
If data is not present, generate it."""
dm = self.dm
# check cluster name
name = dm.getName()
if name is None:
dm.setName(self.name)
elif name != self.name:
raise RuntimeError('name %r does not match with the database: %r'
% (self.name, name))
# load configuration
self.uuid = dm.getUUID()
num_partitions = dm.getNumPartitions()
num_replicas = dm.getNumReplicas()
ptid = dm.getPTID()
# check partition table configuration
if num_partitions is not None and num_replicas is not None:
if num_partitions <= 0:
raise RuntimeError, 'partitions must be more than zero'
# create a partition table
self.pt = PartitionTable(num_partitions, num_replicas)
logging.info('Configuration loaded:')
logging.info('UUID : %s', uuid_str(self.uuid))
logging.info('PTID : %s', dump(ptid))
logging.info('Name : %s', self.name)
logging.info('Partitions: %s', num_partitions)
logging.info('Replicas : %s', num_replicas)
def loadPartitionTable(self):
"""Load a partition table from the database."""
ptid = self.dm.getPTID()
cell_list = self.dm.getPartitionTable()
new_cell_list = []
for offset, uuid, state in cell_list:
# convert from int to Enum
state = CellStates[state]
# register unknown nodes
if self.nm.getByUUID(uuid) is None:
self.nm.createStorage(uuid=uuid)
new_cell_list.append((offset, uuid, state))
# load the partition table in manager
self.pt.clear()
self.pt.update(ptid, new_cell_list, self.nm)
def run(self):
try:
self._run()
except Exception:
logging.exception('Pre-mortem data:')
self.log()
logging.flush()
raise
def _run(self):
"""Make sure that the status is sane and start a loop."""
if len(self.name) == 0:
raise RuntimeError, 'cluster name must be non-empty'
# Make a listening port
handler = identification.IdentificationHandler(self)
self.listening_conn = ListeningConnection(self, handler, self.server)
self.server = self.listening_conn.getAddress()
# Connect to a primary master node, verify data, and
# start the operation. This cycle will be executed permanently,
# until the user explicitly requests a shutdown.
while True:
self.cluster_state = None
self.ready = False
self.operational = False
if self.master_node is None:
# look for the primary master
self.connectToPrimary()
# check my state
node = self.nm.getByUUID(self.uuid)
if node is not None and node.isHidden():
self.wait()
# drop any client node
for conn in self.em.getConnectionList():
if conn not in (self.listening_conn, self.master_conn):
conn.close()
# create/clear event queue
self.event_queue = deque()
self.event_queue_dict = {}
try:
self.initialize()
self.doOperation()
raise RuntimeError, 'should not reach here'
except StoppedOperation, msg:
logging.error('operation stopped: %s', msg)
except PrimaryFailure, msg:
logging.error('primary master is down: %s', msg)
finally:
self.checker = Checker(self)
def connectToPrimary(self):
"""Find a primary master node, and connect to it.
If a primary master node is not elected or ready, repeat
the attempt of a connection periodically.
Note that I do not accept any connection from non-master nodes
at this stage."""
pt = self.pt
# First of all, make sure that I have no connection.
for conn in self.em.getConnectionList():
if not conn.isListening():
conn.close()
# search, find, connect and identify to the primary master
bootstrap = BootstrapManager(self, self.name,
NodeTypes.STORAGE, self.uuid, self.server)
data = bootstrap.getPrimaryConnection()
(node, conn, uuid, num_partitions, num_replicas) = data
self.master_node = node
self.master_conn = conn
logging.info('I am %s', uuid_str(uuid))
self.uuid = uuid
self.dm.setUUID(uuid)
# Reload a partition table from the database. This is necessary
# when a previous primary master died while sending a partition
# table, because the table might be incomplete.
if pt is not None:
self.loadPartitionTable()
if num_partitions != pt.getPartitions():
raise RuntimeError('the number of partitions is inconsistent')
if pt is None or pt.getReplicas() != num_replicas:
# changing number of replicas is not an issue
self.dm.setNumPartitions(num_partitions)
self.dm.setNumReplicas(num_replicas)
self.pt = PartitionTable(num_partitions, num_replicas)
self.loadPartitionTable()
def initialize(self):
logging.debug('initializing...')
_poll = self._poll
self.master_conn.setHandler(initialization.InitializationHandler(self))
while not self.operational:
_poll()
self.ready = True
self.replicator.populate()
self.master_conn.notify(Packets.NotifyReady())
def doOperation(self):
"""Handle everything, including replications and transactions."""
logging.info('doing operation')
poll = self._poll
_poll = self.em._poll
isIdle = self.em.isIdle
handler = master.MasterOperationHandler(self)
self.master_conn.setHandler(handler)
# Forget all unfinished data.
self.dm.dropUnfinishedData()
self.tm.reset()
self.task_queue = task_queue = deque()
try:
self.dm.doOperation(self)
while True:
while task_queue:
try:
while isIdle():
if task_queue[-1].next():
_poll(0)
task_queue.rotate()
break
except StopIteration:
task_queue.pop()
poll()
finally:
del self.task_queue
# XXX: Although no handled exception should happen between
# replicator.populate() and the beginning of this 'try'
# clause, the replicator should be reset in a safer place.
self.replicator = Replicator(self)
# Abort any replication, whether we are feeding or out-of-date.
for node in self.nm.getStorageList(only_identified=True):
node.getConnection().close()
def changeClusterState(self, state):
self.cluster_state = state
if state == ClusterStates.STOPPING_BACKUP:
self.replicator.stop()
def wait(self):
# change handler
logging.info("waiting in hidden state")
_poll = self._poll
handler = hidden.HiddenHandler(self)
for conn in self.em.getConnectionList():
conn.setHandler(handler)
node = self.nm.getByUUID(self.uuid)
while True:
_poll()
if not node.isHidden():
break
def queueEvent(self, some_callable, conn=None, args=(), key=None,
raise_on_duplicate=True):
event_queue_dict = self.event_queue_dict
n = event_queue_dict.get(key)
if n and raise_on_duplicate:
raise AlreadyPendingError()
msg_id = None if conn is None else conn.getPeerId()
self.event_queue.append((key, some_callable, msg_id, conn, args))
if key is not None:
event_queue_dict[key] = n + 1 if n else 1
def executeQueuedEvents(self):
p = self.event_queue.popleft
event_queue_dict = self.event_queue_dict
for _ in xrange(len(self.event_queue)):
key, some_callable, msg_id, conn, args = p()
if key is not None:
n = event_queue_dict[key] - 1
if n:
event_queue_dict[key] = n
else:
del event_queue_dict[key]
if conn is None:
some_callable(*args)
elif not conn.isClosed():
orig_msg_id = conn.getPeerId()
try:
conn.setPeerId(msg_id)
some_callable(conn, *args)
finally:
conn.setPeerId(orig_msg_id)
def logQueuedEvents(self):
if self.event_queue is None:
return
logging.info("Pending events:")
for key, event, _msg_id, _conn, args in self.event_queue:
logging.info(' %r:%r: %r:%r %r %r', key, event.__name__,
_msg_id, _conn, args)
def newTask(self, iterator):
try:
iterator.next()
except StopIteration:
return
self.task_queue.appendleft(iterator)
def closeClient(self, connection):
if connection is not self.replicator.getCurrentConnection() and \
connection not in self.checker.conn_dict:
connection.closeClient()
def shutdown(self, erase=False):
"""Close all connections and exit"""
for c in self.em.getConnectionList():
try:
c.close()
except PrimaryFailure:
pass
# clear database to avoid polluting the cluster at restart
if erase:
self.dm.erase()
logging.info("Application has been asked to shut down")
sys.exit()
| gpl-2.0 | -997,196,639,005,123,000 | 35.103175 | 79 | 0.591119 | false |
sq5bpf/osmo-tetra-sq5bpf | src/demod/python-3.6/usrp1-tetra_demod.py | 9 | 5194 | #!/usr/bin/env python
import sys
import math
from gnuradio import gr, gru, audio, eng_notation, blks2, optfir
from gnuradio import usrp
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import cqpsk
except:
from tetra_demod import cqpsk
# applies frequency translation, resampling (interpolation/decimation) and cqpsk demodulation
class my_top_block(gr.top_block):
def __init__(self, options):
gr.top_block.__init__(self)
fusb_block_size = gr.prefs().get_long('fusb', 'block_size', 4096)
fusb_nblocks = gr.prefs().get_long('fusb', 'nblocks', 16)
self._u = usrp.source_c(decim_rate=options.decim, fusb_block_size=fusb_block_size, fusb_nblocks=fusb_nblocks)
# master clock
if options.fpga_freq is not None:
self._u.set_fpga_master_clock_freq(long(options.fpga_freq))
# default subdev if use didn't pick one
if options.rx_subdev_spec is None:
if u.db(0, 0).dbid() >= 0:
options.rx_subdev_spec = (0, 0)
elif u.db(1, 0).dbid() >= 0:
options.rx_subdev_spec = (1, 0)
else:
options.rx_subdev_spec = (0, 0)
# configure usrp mux
self._u.set_mux(usrp.determine_rx_mux_value(self._u, options.rx_subdev_spec))
# determine the daughterboard subdevice
self.subdev = usrp.selected_subdev(self._u, options.rx_subdev_spec)
# select antenna
if options.antenna is not None:
print "Selecting antenna %s" % (options.antenna,)
self.subdev.select_rx_antenna(options.antenna)
# set initial values
if options.gain is None:
# if no gain was specified, use the mid-point in dB
g = self.subdev.gain_range()
options.gain = float(g[0]+g[1])/2
r = self._u.tune(0, self.subdev, options.freq)
self.subdev.set_gain(options.gain)
#sample_rate = options.fpga_clock/options.decim
sample_rate = self._u.adc_freq() / self._u.decim_rate()
symbol_rate = 18000
sps = 2
# output rate will be 36,000
ntaps = 11 * sps
new_sample_rate = symbol_rate * sps
channel_taps = gr.firdes.low_pass(1.0, sample_rate, options.low_pass, options.low_pass * 0.1, gr.firdes.WIN_HANN)
FILTER = gr.freq_xlating_fir_filter_ccf(1, channel_taps, options.calibration, sample_rate)
sys.stderr.write("sample rate: %d\n" %(sample_rate))
DEMOD = cqpsk.cqpsk_demod( samples_per_symbol = sps,
excess_bw=0.35,
costas_alpha=0.03,
gain_mu=0.05,
mu=0.05,
omega_relative_limit=0.05,
log=options.log,
verbose=options.verbose)
OUT = gr.file_sink(gr.sizeof_float, options.output_file)
r = float(sample_rate) / float(new_sample_rate)
INTERPOLATOR = gr.fractional_interpolator_cc(0, r)
self.connect(self._u, FILTER, INTERPOLATOR, DEMOD, OUT)
def get_options():
parser = OptionParser(option_class=eng_option)
# usrp related settings
parser.add_option("-d", "--decim", type="int", default=250,
help="Set USRP decimation rate to DECIM [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="Select USRP Rx side A or B (default=first one with a daughterboard)")
parser.add_option("-A", "--antenna", default=None,
help="select Rx Antenna")
parser.add_option("-F", "--fpga-freq", type="eng_float", default=None,
help="set USRP reference clock frequency to FPGA_FREQ", metavar="FPGA_FREQ")
# demodulator related settings
parser.add_option("-c", "--calibration", type="int", default=0, help="freq offset")
parser.add_option("-l", "--log", action="store_true", default=False, help="dump debug .dat files")
parser.add_option("-L", "--low-pass", type="eng_float", default=25e3, help="low pass cut-off", metavar="Hz")
parser.add_option("-o", "--output-file", type="string", default="out.float", help="specify the bit output file")
parser.add_option("-v", "--verbose", action="store_true", default=False, help="dump demodulation data")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
return (options)
if __name__ == "__main__":
(options) = get_options()
tb = my_top_block(options)
try:
tb.run()
except KeyboardInterrupt:
tb.stop()
| agpl-3.0 | -417,426,617,710,753,860 | 38.052632 | 121 | 0.585868 | false |
sontek/rethinkdb | external/v8_3.30.33.16/tools/push-to-trunk/chromium_roll.py | 40 | 4767 | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import sys
from common_includes import *
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# Update v8 remote tracking branches.
self.GitFetchOrigin()
class DetectLastPush(Step):
MESSAGE = "Detect commit ID of last push to trunk."
def RunStep(self):
self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
branch="origin/candidates", include_patches=True)
self["push_title"] = self.GitLog(n=1, format="%s",
git_hash=self["last_push"])
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
def RunStep(self):
self["v8_path"] = os.getcwd()
cwd = self._options.chromium
os.chdir(cwd)
self.InitialEnvironmentChecks(cwd)
# Check for a clean workdir.
if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
def RunStep(self):
self.GitCheckout("master", cwd=self._options.chromium)
self.Command("gclient", "sync --nohooks", cwd=self._options.chromium)
self.GitPull(cwd=self._options.chromium)
# Update v8 remotes.
self.GitFetchOrigin()
self.GitCreateBranch("v8-roll-%s" % self["last_push"],
cwd=self._options.chromium)
class UploadCL(Step):
MESSAGE = "Create and upload CL."
def RunStep(self):
# Patch DEPS file.
if self.Command(
"roll-dep", "v8 %s" % self["last_push"],
cwd=self._options.chromium) is None:
self.Die("Failed to create deps for %s" % self["last_push"])
commit_title = "Update V8 to %s." % self["push_title"].lower()
sheriff = ""
if self["sheriff"]:
sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
% self["sheriff"])
self.GitCommit("%s%s\n\nTBR=%s" %
(commit_title, sheriff, self._options.reviewer),
author=self._options.author,
cwd=self._options.chromium)
if not self._options.dry_run:
self.GitUpload(author=self._options.author,
force=True,
cq=self._options.use_commit_queue,
cwd=self._options.chromium)
print "CL uploaded."
else:
self.GitCheckout("master", cwd=self._options.chromium)
self.GitDeleteBranch("v8-roll-%s" % self["last_push"],
cwd=self._options.chromium)
print "Dry run - don't upload."
# TODO(machenbach): Make this obsolete. We are only in the chromium chechout
# for the initial .git check.
class SwitchV8(Step):
MESSAGE = "Returning to V8 checkout."
def RunStep(self):
os.chdir(self["v8_path"])
class CleanUp(Step):
MESSAGE = "Done!"
def RunStep(self):
print("Congratulations, you have successfully rolled %s into "
"Chromium. Please don't forget to update the v8rel spreadsheet."
% self["last_push"])
# Clean up all temporary files.
Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
class ChromiumRoll(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("-l", "--last-push",
help="The git commit ID of the last push to trunk.")
parser.add_argument("--use-commit-queue",
help="Check the CQ bit on upload.",
default=False, action="store_true")
def _ProcessOptions(self, options): # pragma: no cover
if not options.author or not options.reviewer:
print "A reviewer (-r) and an author (-a) are required."
return False
options.requires_editor = False
options.force = True
options.manual = False
return True
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
}
def _Steps(self):
return [
Preparation,
DetectLastPush,
DetermineV8Sheriff,
SwitchChromium,
UpdateChromiumCheckout,
UploadCL,
SwitchV8,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(ChromiumRoll().Run())
| agpl-3.0 | 6,378,737,579,575,633,000 | 29.754839 | 77 | 0.618209 | false |
JonathanBennett/marathon-metric-forwarder | node_modules/node-gyp/gyp/pylib/gyp/msvs_emulation.py | 1407 | 47697 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyp.common import OrderedSet
import gyp.MSVSUtil
import gyp.MSVSVersion
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not callable(map):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = filter(None, [map(elem) for elem in element])
else:
element = map(element)
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.iteritems():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in configs.values():
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = filter(lambda x: not x.startswith('/MP'), cflags)
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
stack_reserve_size = self._Setting(
('VCLinkerTool', 'StackReserveSize'), config, default='')
if stack_reserve_size:
stack_commit_size = self._Setting(
('VCLinkerTool', 'StackCommitSize'), config, default='')
if stack_commit_size:
stack_commit_size = ',' + stack_commit_size
ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = filter(lambda x: 'DYNAMICBASE' in x or x == '/FIXED',
ldflags)
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not filter(lambda x: 'NXCOMPAT' in x, ldflags):
ldflags.append('/NXCOMPAT')
have_def_file = filter(lambda x: x.startswith('/DEF:'), ldflags)
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyp.common.EnsureDirExists(build_dir_generated_name)
f = gyp.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.iteritems():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.iteritems():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyp files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = filter(lambda x: '$' not in x, sources)
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = filter(lambda x: not os.path.exists(x), relative)
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| mit | 2,190,254,302,342,393,600 | 42.879485 | 80 | 0.657379 | false |
wileeam/airflow | airflow/migrations/versions/cf5dc11e79ad_drop_user_and_chart.py | 7 | 3768 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""drop_user_and_chart
Revision ID: cf5dc11e79ad
Revises: 41f5f12752f8
Create Date: 2019-01-24 15:30:35.834740
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = 'cf5dc11e79ad'
down_revision = '41f5f12752f8'
branch_labels = None
depends_on = None
def upgrade():
# We previously had a KnownEvent's table, but we deleted the table without
# a down migration to remove it (so we didn't delete anyone's data if they
# were happing to use the feature.
#
# But before we can delete the users table we need to drop the FK
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
if 'known_event' in inspector.get_table_names() != 'sqlite':
op.drop_constraint('known_event_user_id_fkey', 'known_event')
op.drop_table("chart")
op.drop_table("users")
def downgrade():
conn = op.get_bind()
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=250), nullable=True),
sa.Column('email', sa.String(length=500), nullable=True),
sa.Column('password', sa.String(255)),
sa.Column('superuser', sa.Boolean(), default=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table(
'chart',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('label', sa.String(length=200), nullable=True),
sa.Column('conn_id', sa.String(length=250), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('chart_type', sa.String(length=100), nullable=True),
sa.Column('sql_layout', sa.String(length=50), nullable=True),
sa.Column('sql', sa.Text(), nullable=True),
sa.Column('y_log_scale', sa.Boolean(), nullable=True),
sa.Column('show_datatable', sa.Boolean(), nullable=True),
sa.Column('show_sql', sa.Boolean(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('default_params', sa.String(length=5000), nullable=True),
sa.Column('x_is_date', sa.Boolean(), nullable=True),
sa.Column('iteration_no', sa.Integer(), nullable=True),
sa.Column('last_modified', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
if conn.dialect.name == 'mysql':
conn.execute("SET time_zone = '+00:00'")
op.alter_column(table_name='chart', column_name='last_modified', type_=mysql.TIMESTAMP(fsp=6))
else:
if conn.dialect.name in ('sqlite', 'mssql'):
return
if conn.dialect.name == 'postgresql':
conn.execute("set timezone=UTC")
op.alter_column(table_name='chart', column_name='last_modified', type_=sa.TIMESTAMP(timezone=True))
| apache-2.0 | -4,182,711,032,820,492,000 | 37.060606 | 107 | 0.667728 | false |
etnestad/xcsoar | tools/gdb.py | 25 | 7308 | #
# XCSoar Glide Computer - http://www.xcsoar.org/
# Copyright (C) 2000-2012 The XCSoar Project
# A detailed list of copyright holders can be found in the file "AUTHORS".
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#
# This is a gdb module that aids debugging XCSoar. To load it, launch
# gdb and type:
#
# source gdb.py
#
import gdb
double_type = gdb.lookup_type('double')
string_type = gdb.lookup_type('char').pointer()
def fixed_value(f):
"""Extracts the floating point value of a 'fixed' instance."""
if f.type.unqualified().strip_typedefs().code == double_type.code:
return float(f.cast(double_type))
else:
return long(f['m_nVal']) / (1. * (1 << 28))
def angle_value(a):
return fixed_value(a['value']) * 57.2957795131
class FixedPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return str(fixed_value(self.value))
class AnglePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return str(angle_value(self.value))
class AngleRangePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
start = AnglePrinter(self.value['start']).to_string()
end = AnglePrinter(self.value['end']).to_string()
return 'AngleRange(%s..%s)' % (start, end)
class GeoPointPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
if angle_value(self.value['latitude']) >= 180:
return 'GeoPoint::INVALID'
longitude = AnglePrinter(self.value['longitude']).to_string()
latitude = AnglePrinter(self.value['latitude']).to_string()
return 'GeoPoint(%s %s)' % (longitude, latitude)
class GeoBoundsPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
if angle_value(self.value['latitude']['end']) >= 180:
return 'GeoBounds::INVALID'
west = AnglePrinter(self.value['longitude']['start']).to_string()
east = AnglePrinter(self.value['longitude']['end']).to_string()
south = AnglePrinter(self.value['latitude']['start']).to_string()
north = AnglePrinter(self.value['latitude']['end']).to_string()
return 'GeoBounds([%s .. %s] [%s .. %s])' % (west, east, south, north)
class GeoVectorPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
bearing = AnglePrinter(self.value['bearing']).to_string()
distance = fixed_value(self.value['distance'])
if distance < 0:
return 'GeoVector::INVALID'
return 'GeoVector(%s %s)' % (bearing, distance)
class SpeedVectorPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
bearing = AnglePrinter(self.value['bearing']).to_string()
norm = fixed_value(self.value['norm'])
if norm < 0:
return 'GeoVector::INVALID'
if norm == 0:
return 'GeoVector::ZERO'
return 'SpeedVector(%s %s)' % (bearing, norm)
class ValidityPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Validity(%u)' % self.value['last']
class StaticStringPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return self.value['data'].cast(string_type)
class BrokenDatePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Date(%04u/%02u/%02u)' % \
(int(self.value['year']),
int(self.value['month']),
int(self.value['day']))
class BrokenTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'Time(%02u:%02u:%02u)' % \
(int(self.value['hour']),
int(self.value['minute']),
int(self.value['second']))
class BrokenDateTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
return 'DateTime(%04u/%02u/%02u %02u:%02u:%02u)' % \
(int(self.value['year']),
int(self.value['month']),
int(self.value['day']),
int(self.value['hour']),
int(self.value['minute']),
int(self.value['second']))
class RoughTimePrinter:
def __init__(self, value):
self.value = value
def to_string(self):
value = int(self.value['value'])
if value == 0xffff:
return 'RoughTime::INVALID'
return 'RoughTime(%02u:%02u)' % (value / 60, value % 60)
class RoughTimeSpanPrinter:
def __init__(self, value):
self.value = value
def to_string(self):
start = int(self.value['start']['value'])
end = int(self.value['start']['value'])
if start == 0xffff and end == 0xffff:
return 'RoughTimeSpan::INVALID'
if start == 0xffff:
start = ''
else:
start = '%02u:%02u' % (start / 60, start % 60)
if end == 0xffff:
end = ''
else:
end = '%02u:%02u' % (end / 60, end % 60)
return 'RoughTimeSpan(%s..%s)' % (start, end)
def lookup_function(value):
type = value.type
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
type = type.unqualified().strip_typedefs()
typename = type.tag
if typename == None:
return None
if typename == 'fixed':
return FixedPrinter(value)
elif typename == 'Angle':
return AnglePrinter(value)
elif typename == 'AngleRange':
return AngleRangePrinter(value)
elif typename == 'GeoPoint':
return GeoPointPrinter(value)
elif typename == 'GeoBounds':
return GeoBoundsPrinter(value)
elif typename == 'GeoVector':
return GeoVectorPrinter(value)
elif typename == 'SpeedVector':
return SpeedVectorPrinter(value)
elif typename == 'Validity':
return ValidityPrinter(value)
elif typename == 'BrokenDate':
return BrokenDatePrinter(value)
elif typename == 'BrokenTime':
return BrokenTimePrinter(value)
elif typename == 'BrokenDateTime':
return BrokenDateTimePrinter(value)
elif typename == 'RoughTime':
return RoughTimePrinter(value)
elif typename == 'RoughTimeSpan':
return RoughTimeSpanPrinter(value)
elif typename[:12] == 'StaticString' or typename[:12] == 'NarrowString':
return StaticStringPrinter(value)
return None
gdb.pretty_printers.append(lookup_function)
| gpl-2.0 | -1,475,593,326,829,678,800 | 29.198347 | 78 | 0.601669 | false |
natetrue/ReplicatorG | skein_engines/skeinforge-31/fabmetheus_utilities/geometry/geometry_tools/path.py | 2 | 6168 | """
Face of a triangle mesh.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_tools import dictionary
from fabmetheus_utilities.geometry.geometry_tools import vertex
from fabmetheus_utilities.geometry.manipulation_evaluator import matrix
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import svg_writer
from fabmetheus_utilities import xml_simple_reader
from fabmetheus_utilities import xml_simple_writer
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = "$Date: 2008/02/05 $"
__license__ = 'GPL 3.0'
def convertProcessXMLElementRenameByPaths(geometryOutput, xmlElement):
"Convert the xml element to a path xml element, add paths and process."
convertXMLElementRenameByPaths(geometryOutput, xmlElement)
processXMLElement(xmlElement)
def convertXMLElement(geometryOutput, xmlElement):
"Convert the xml element to a path xml element."
vertex.addGeometryList(geometryOutput, xmlElement)
def convertXMLElementRename(geometryOutput, xmlElement):
"Convert the xml element to a path xml element."
xmlElement.className = 'path'
convertXMLElement(geometryOutput, xmlElement)
def convertXMLElementRenameByPaths(geometryOutput, xmlElement):
"Convert the xml element to a path xml element and add paths."
xmlElement.className = 'path'
for geometryOutputChild in geometryOutput:
pathElement = xml_simple_reader.XMLElement()
pathElement.setParentAddToChildren(xmlElement)
convertXMLElementRename( geometryOutputChild, pathElement)
def processXMLElement(xmlElement):
"Process the xml element."
evaluate.processArchivable(Path, xmlElement)
class Path(dictionary.Dictionary):
"A path."
def __init__(self):
"Add empty lists."
dictionary.Dictionary.__init__(self)
self.matrix4X4 = matrix.Matrix()
self.oldChainTetragrid = None
self.transformedPath = None
self.vertexes = []
def addXMLInnerSection(self, depth, output):
"Add the xml section for this object."
if self.matrix4X4 != None:
self.matrix4X4.addXML(depth, output)
xml_simple_writer.addXMLFromVertexes(depth, output, self.vertexes)
def getFabricationExtension(self):
"Get fabrication extension."
return 'svg'
def getFabricationText(self):
"Get fabrication text."
carving = SVGFabricationCarving(self.xmlElement)
carving.setCarveLayerThickness(evaluate.getSheetThickness(self.xmlElement))
carving.processSVGElement(self.xmlElement.getRoot().parser.fileName)
return str(carving)
def getMatrixChainTetragrid(self):
"Get the matrix chain tetragrid."
return self.matrix4X4.getOtherTimesSelf(self.xmlElement.parent.object.getMatrixChainTetragrid()).matrixTetragrid
def getPaths(self):
"Get all paths."
self.transformedPath = None
return dictionary.getAllPaths([self.vertexes], self)
def getTransformedPaths(self):
"Get all transformed paths."
if self.xmlElement == None:
return dictionary.getAllPaths([self.vertexes], self)
chainTetragrid = self.getMatrixChainTetragrid()
if self.oldChainTetragrid != chainTetragrid:
self.oldChainTetragrid = chainTetragrid
self.transformedPath = None
if self.transformedPath == None:
self.transformedPath = matrix.getTransformedVector3s(chainTetragrid, self.vertexes)
return dictionary.getAllTransformedPaths([self.transformedPath], self)
class SVGFabricationCarving:
"An slc carving."
def __init__(self, xmlElement):
"Add empty lists."
self.layerThickness = 1.0
self.rotatedLoopLayers = []
self.xmlElement = xmlElement
def __repr__(self):
"Get the string representation of this carving."
return self.getCarvedSVG()
def addXML(self, depth, output):
"Add xml for this object."
xml_simple_writer.addXMLFromObjects(depth, self.rotatedLoopLayers, output)
def getCarveCornerMaximum(self):
"Get the corner maximum of the vertexes."
return self.cornerMaximum
def getCarveCornerMinimum(self):
"Get the corner minimum of the vertexes."
return self.cornerMinimum
def getCarvedSVG(self):
"Get the carved svg text."
return svg_writer.getSVGByLoopLayers(False, self.rotatedLoopLayers, self)
def getCarveLayerThickness(self):
"Get the layer thickness."
return self.layerThickness
def getCarveRotatedBoundaryLayers(self):
"Get the rotated boundary layers."
return self.rotatedLoopLayers
def getFabmetheusXML(self):
"Return the fabmetheus XML."
return self.xmlElement.getParser().getOriginalRoot()
def getInterpretationSuffix(self):
"Return the suffix for a carving."
return 'svg'
def processSVGElement(self, fileName):
"Parse SVG element and store the layers."
self.fileName = fileName
paths = self.xmlElement.object.getPaths()
if len(paths) < 1:
return
firstPath = paths[0]
if len(firstPath) < 1:
return
rotatedLoopLayer = euclidean.RotatedLoopLayer(firstPath[0].z)
self.rotatedLoopLayers.append(rotatedLoopLayer)
for path in paths:
rotatedLoopLayer.loops.append(euclidean.getComplexPath(path))
self.cornerMaximum = Vector3(-999999999.0, -999999999.0, -999999999.0)
self.cornerMinimum = Vector3(999999999.0, 999999999.0, 999999999.0)
svg_writer.setSVGCarvingCorners(self.rotatedLoopLayers, self)
halfLayerThickness = 0.5 * self.layerThickness
self.cornerMaximum.z += halfLayerThickness
self.cornerMinimum.z -= halfLayerThickness
def setCarveBridgeLayerThickness( self, bridgeLayerThickness ):
"Set the bridge layer thickness. If the infill is not in the direction of the bridge, the bridge layer thickness should be given as None or not set at all."
pass
def setCarveLayerThickness( self, layerThickness ):
"Set the layer thickness."
self.layerThickness = layerThickness
def setCarveImportRadius( self, importRadius ):
"Set the import radius."
pass
def setCarveIsCorrectMesh( self, isCorrectMesh ):
"Set the is correct mesh flag."
pass
| gpl-2.0 | -2,472,274,397,435,233,000 | 33.077348 | 159 | 0.778534 | false |
xzovy/cryptographer.py | cryptochatter-server.py | 1 | 2340 | # chat_server.py
import sys, socket, select
HOST = ''
SOCKET_LIST = []
RECV_BUFFER = 4096
PORT = 443
def chat_server():
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
# add server socket object to the list of readable connections
SOCKET_LIST.append(server_socket)
print("Chat server started on port " + str(PORT))
while 1:
# get the list sockets which are ready to be read through select
# 4th arg, time_out = 0 : poll and never block
ready_to_read,ready_to_write,in_error = select.select(SOCKET_LIST,[],[],0)
for sock in ready_to_read:
# a new connection request recieved
if sock == server_socket:
sockfd, addr = server_socket.accept()
SOCKET_LIST.append(sockfd)
# a message from a client, not a new connection
else:
# process data recieved from client,
try:
# receiving data from the socket.
data = sock.recv(RECV_BUFFER)
if data:
# there is something in the socket
msg = data.decode('utf-8')
print(msg)
broadcast(server_socket, sock, data)
else:
# remove the socket that's broken
if sock in SOCKET_LIST:
SOCKET_LIST.remove(sock)
# exception
except:
continue
server_socket.close()
# broadcast chat messages to all connected clients
def broadcast (server_socket, sock, message):
for socket in SOCKET_LIST:
# send the message only to peer
if socket != server_socket and socket != sock :
try :
socket.send(message)
except :
# broken socket connection
socket.close()
# broken socket, remove it
if socket in SOCKET_LIST:
SOCKET_LIST.remove(socket)
if __name__ == "__main__":
sys.exit(chat_server())
| gpl-2.0 | 8,741,897,854,056,861,000 | 30.2 | 82 | 0.522222 | false |
lrr-tum/mmbwmon | vendor/fast-lib/vendor/mosquitto-1.4.12/test/lib/03-publish-c2b-qos2-disconnect.py | 10 | 3012 | #!/usr/bin/env python
# Test whether a client sends a correct PUBLISH to a topic with QoS 2 and responds to a disconnect.
import inspect
import os
import socket
import sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
keepalive = 60
connect_packet = mosq_test.gen_connect("publish-qos2-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
disconnect_packet = mosq_test.gen_disconnect()
mid = 1
publish_packet = mosq_test.gen_publish("pub/qos2/test", qos=2, mid=mid, payload="message")
publish_dup_packet = mosq_test.gen_publish("pub/qos2/test", qos=2, mid=mid, payload="message", dup=True)
pubrec_packet = mosq_test.gen_pubrec(mid)
pubrel_packet = mosq_test.gen_pubrel(mid)
pubcomp_packet = mosq_test.gen_pubcomp(mid)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
env['LD_LIBRARY_PATH'] = '../../lib:../../lib/cpp'
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../lib/python:'+pp
client = mosq_test.start_client(filename=sys.argv[1].replace('/', '-'), cmd=client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "publish", publish_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "retried publish", publish_dup_packet):
conn.send(pubrec_packet)
if mosq_test.expect_packet(conn, "pubrel", pubrel_packet):
# Disconnect client. It should reconnect.
conn.close()
(conn, address) = sock.accept()
conn.settimeout(15)
# Complete connection and message flow.
if mosq_test.expect_packet(conn, "connect", connect_packet):
conn.send(connack_packet)
if mosq_test.expect_packet(conn, "retried pubrel", pubrel_packet):
conn.send(pubcomp_packet)
if mosq_test.expect_packet(conn, "disconnect", disconnect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
| gpl-3.0 | 3,230,275,059,592,758,300 | 32.466667 | 129 | 0.614542 | false |
j340m3/python-msquaredc | docs/conf.py | 1 | 1313 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'MsquaredC'
year = '2016-2018'
author = 'Jerome Bergmann'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.1.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/j340m3/python-msquaredc/issues/%s', '#'),
'pr': ('https://github.com/j340m3/python-msquaredc/pull/%s', 'PR #'),
}
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {}
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| bsd-2-clause | -6,366,636,002,596,574,000 | 24.25 | 75 | 0.654989 | false |
realsaiko/odoo | addons/base_import_module/controllers/main.py | 354 | 1518 | # -*- coding: utf-8 -*-
import functools
import openerp
from openerp.http import Controller, route, request, Response
def webservice(f):
@functools.wraps(f)
def wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception, e:
return Response(response=str(e), status=500)
return wrap
class ImportModule(Controller):
def check_user(self, uid=None):
if uid is None:
uid = request.uid
is_admin = request.registry['res.users'].has_group(request.cr, uid, 'base.group_erp_manager')
if not is_admin:
raise openerp.exceptions.AccessError("Only administrators can upload a module")
@route('/base_import_module/login', type='http', auth='none', methods=['POST'])
@webservice
def login(self, login, password, db=None):
if db and db != request.db:
raise Exception("Could not select database '%s'" % db)
uid = request.session.authenticate(request.db, login, password)
if not uid:
return Response(response="Wrong login/password", status=401)
self.check_user(uid)
return "ok"
@route('/base_import_module/upload', type='http', auth='user', methods=['POST'])
@webservice
def upload(self, mod_file=None, force='', **kw):
self.check_user()
force = True if force == '1' else False
return request.registry['ir.module.module'].import_zipfile(request.cr, request.uid, mod_file, force=force, context=request.context)[0]
| agpl-3.0 | -302,733,171,776,546,200 | 36.95 | 142 | 0.6278 | false |
t0mk/ansible | lib/ansible/modules/network/lenovo/cnos_backup.py | 3 | 11204 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Backup Config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: cnos_backup
author: "Dave Kasberg (@dkasberg)"
short_description: Backup the current running or startup configuration to a remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to back up the running or startup configurations of a switch to a
remote server. This is achieved by periodically saving a copy of the
startup or running configuration of the network device to a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from
where the remote server can be reached. The next step is to provide the
full file path of the location where the configuration will be backed up.
Authentication details required by the remote server must be provided as
well. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This specifies what type of configuration will be backed up. The
choices are the running or startup configurations. There is no
default value, so it will result in an error if the input is
incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server to where to upload the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not specified,
there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
-This specifies the IP Address of the remote server to where the
configuration will be backed up.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path where the configuration file
will be copied on the remote server. In case the relative path is
used as the variable value, the root folder for the user of the
server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol
used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol
used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_backup. These are written in the main.yml file of the tasks directory.
---
- name: Test Running Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Running Config Backup -TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup - TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
---
return value: |
On successful execution, the method returns a message in JSON format
[Config file tranferred to server]
Upon any failure, the method returns an error display string.
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
host = module.params['host']
deviceType = module.params['deviceType']
configType = module.params['configType']
protocol = module.params['protocol'].lower()
rcserverip = module.params['serverip']
rcpath = module.params['rcpath']
serveruser = module.params['serverusername']
serverpwd = module.params['serverpassword']
output = ""
timeout = 90
tftptimeout = 450
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(host, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Invoke method for config transfer from server
if(configType == 'running-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doRunningConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureRunningConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
elif(configType == 'startup-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doStartupConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureStartupConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser, serverpwd,
remote_conn)
else:
transfer_status = "Invalid Protocol option"
else:
transfer_status = "Invalid configType Option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to server")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 | -9,018,001,414,475,193,000 | 37.768166 | 138 | 0.661549 | false |
iemejia/beam | sdks/python/apache_beam/examples/complete/juliaset/juliaset/juliaset.py | 5 | 4390 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Julia set computing workflow: https://en.wikipedia.org/wiki/Julia_set.
We use the quadratic polinomial f(z) = z*z + c, with c = -.62772 +.42193i
"""
# pytype: skip-file
import argparse
import apache_beam as beam
from apache_beam.io import WriteToText
def from_pixel(x, y, n):
"""Converts a NxN pixel position to a (-1..1, -1..1) complex number."""
return complex(2.0 * x / n - 1.0, 2.0 * y / n - 1.0)
def get_julia_set_point_color(element, c, n, max_iterations):
"""Given an pixel, convert it into a point in our julia set."""
x, y = element
z = from_pixel(x, y, n)
for i in range(max_iterations):
if z.real * z.real + z.imag * z.imag > 2.0:
break
z = z * z + c
return x, y, i # pylint: disable=undefined-loop-variable
def generate_julia_set_colors(pipeline, c, n, max_iterations):
"""Compute julia set coordinates for each point in our set."""
def point_set(n):
for x in range(n):
for y in range(n):
yield (x, y)
julia_set_colors = (
pipeline
| 'add points' >> beam.Create(point_set(n))
| beam.Map(get_julia_set_point_color, c, n, max_iterations))
return julia_set_colors
def generate_julia_set_visualization(data, n, max_iterations):
"""Generate the pixel matrix for rendering the julia set as an image."""
import numpy as np # pylint: disable=wrong-import-order, wrong-import-position
colors = []
for r in range(0, 256, 16):
for g in range(0, 256, 16):
for b in range(0, 256, 16):
colors.append((r, g, b))
xy = np.zeros((n, n, 3), dtype=np.uint8)
for x, y, iteration in data:
xy[x, y] = colors[iteration * len(colors) // max_iterations]
return xy
def save_julia_set_visualization(out_file, image_array):
"""Save the fractal image of our julia set as a png."""
from matplotlib import pyplot as plt # pylint: disable=wrong-import-order, wrong-import-position
plt.imsave(out_file, image_array, format='png')
def run(argv=None): # pylint: disable=missing-docstring
parser = argparse.ArgumentParser()
parser.add_argument(
'--grid_size',
dest='grid_size',
default=1000,
help='Size of the NxN matrix')
parser.add_argument(
'--coordinate_output',
dest='coordinate_output',
required=True,
help='Output file to write the color coordinates of the image to.')
parser.add_argument(
'--image_output',
dest='image_output',
default=None,
help='Output file to write the resulting image to.')
known_args, pipeline_args = parser.parse_known_args(argv)
with beam.Pipeline(argv=pipeline_args) as p:
n = int(known_args.grid_size)
coordinates = generate_julia_set_colors(p, complex(-.62772, .42193), n, 100)
def x_coord_key(x_y_i):
(x, y, i) = x_y_i
return (x, (x, y, i))
# Group each coordinate triplet by its x value, then write the coordinates
# to the output file with an x-coordinate grouping per line.
# pylint: disable=expression-not-assigned
(
coordinates
| 'x coord key' >> beam.Map(x_coord_key)
| 'x coord' >> beam.GroupByKey()
| 'format' >> beam.Map(
lambda k_coords: ' '.join('(%s, %s, %s)' % c for c in k_coords[1]))
| WriteToText(known_args.coordinate_output))
# Optionally render the image and save it to a file.
# TODO(silviuc): Add this functionality.
# if p.options.image_output is not None:
# julia_set_image = generate_julia_set_visualization(
# file_with_coordinates, n, 100)
# save_julia_set_visualization(p.options.image_output, julia_set_image)
| apache-2.0 | -6,608,805,568,544,762,000 | 33.031008 | 99 | 0.663781 | false |
jinlmsft/kubernetes | hack/verify-flags-underscore.py | 169 | 9059 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <[email protected]>
@author: Jorge Orpinel <[email protected]>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false negatives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 6,769,780,855,738,991,000 | 35.381526 | 184 | 0.58693 | false |
jazkarta/edx-platform-for-isc | common/lib/xmodule/xmodule/modulestore/tests/test_contentstore.py | 87 | 8284 | """
Test contentstore.mongo functionality
"""
import logging
from uuid import uuid4
import unittest
import mimetypes
from tempfile import mkdtemp
import path
import shutil
from opaque_keys.edx.locator import CourseLocator, AssetLocator
from opaque_keys.edx.keys import AssetKey
from xmodule.tests import DATA_DIR
from xmodule.contentstore.mongo import MongoContentStore
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
import ddt
from __builtin__ import delattr
from xmodule.modulestore.tests.mongo_connection import MONGO_PORT_NUM, MONGO_HOST
log = logging.getLogger(__name__)
HOST = MONGO_HOST
PORT = MONGO_PORT_NUM
DB = 'test_mongo_%s' % uuid4().hex[:5]
@ddt.ddt
class TestContentstore(unittest.TestCase):
"""
Test the methods in contentstore.mongo using deprecated and non-deprecated keys
"""
# don't use these 2 class vars as they restore behavior once the tests are done
asset_deprecated = None
ssck_deprecated = None
@classmethod
def tearDownClass(cls):
"""
Restores deprecated values
"""
if cls.asset_deprecated is not None:
setattr(AssetLocator, 'deprecated', cls.asset_deprecated)
else:
delattr(AssetLocator, 'deprecated')
if cls.ssck_deprecated is not None:
setattr(CourseLocator, 'deprecated', cls.ssck_deprecated)
else:
delattr(CourseLocator, 'deprecated')
return super(TestContentstore, cls).tearDownClass()
def set_up_assets(self, deprecated):
"""
Setup contentstore w/ proper overriding of deprecated.
"""
# since MongoModuleStore and MongoContentStore are basically assumed to be together, create this class
# as well
self.contentstore = MongoContentStore(HOST, DB, port=PORT)
self.addCleanup(self.contentstore._drop_database) # pylint: disable=protected-access
setattr(AssetLocator, 'deprecated', deprecated)
setattr(CourseLocator, 'deprecated', deprecated)
self.course1_key = CourseLocator('test', 'asset_test', '2014_07')
self.course2_key = CourseLocator('test', 'asset_test2', '2014_07')
self.course1_files = ['contains.sh', 'picture1.jpg', 'picture2.jpg']
self.course2_files = ['picture1.jpg', 'picture3.jpg', 'door_2.ogg']
def load_assets(course_key, files):
locked = False
for filename in files:
asset_key = course_key.make_asset_key('asset', filename)
self.save_asset(filename, asset_key, filename, locked)
locked = not locked
load_assets(self.course1_key, self.course1_files)
load_assets(self.course2_key, self.course2_files)
def save_asset(self, filename, asset_key, displayname, locked):
"""
Load and save the given file.
"""
with open("{}/static/{}".format(DATA_DIR, filename), "rb") as f:
content = StaticContent(
asset_key, displayname, mimetypes.guess_type(filename)[0], f.read(),
locked=locked
)
self.contentstore.save(content)
@ddt.data(True, False)
def test_delete(self, deprecated):
"""
Test that deleting assets works
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.contentstore.delete(asset_key)
with self.assertRaises(NotFoundError):
self.contentstore.find(asset_key)
# ensure deleting a non-existent file is a noop
self.contentstore.delete(asset_key)
@ddt.data(True, False)
def test_find(self, deprecated):
"""
Test using find
"""
self.set_up_assets(deprecated)
asset_key = self.course1_key.make_asset_key('asset', self.course1_files[0])
self.assertIsNotNone(self.contentstore.find(asset_key), "Could not find {}".format(asset_key))
self.assertIsNotNone(self.contentstore.find(asset_key, as_stream=True), "Could not find {}".format(asset_key))
unknown_asset = self.course1_key.make_asset_key('asset', 'no_such_file.gif')
with self.assertRaises(NotFoundError):
self.contentstore.find(unknown_asset)
self.assertIsNone(
self.contentstore.find(unknown_asset, throw_on_not_found=False),
"Found unknown asset {}".format(unknown_asset)
)
@ddt.data(True, False)
def test_export_for_course(self, deprecated):
"""
Test export
"""
self.set_up_assets(deprecated)
root_dir = path.path(mkdtemp())
try:
self.contentstore.export_all_for_course(
self.course1_key, root_dir,
path.path(root_dir / "policy.json"),
)
for filename in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertTrue(filepath.isfile(), "{} is not a file".format(filepath))
for filename in self.course2_files:
if filename not in self.course1_files:
filepath = path.path(root_dir / filename)
self.assertFalse(filepath.isfile(), "{} is unexpected exported a file".format(filepath))
finally:
shutil.rmtree(root_dir)
@ddt.data(True, False)
def test_get_all_content(self, deprecated):
"""
Test get_all_content_for_course
"""
self.set_up_assets(deprecated)
course1_assets, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, len(self.course1_files), course1_assets)
for asset in course1_assets:
parsed = AssetKey.from_string(asset['filename'])
self.assertIn(parsed.name, self.course1_files)
course1_assets, __ = self.contentstore.get_all_content_for_course(self.course1_key, 1, 1)
self.assertEqual(len(course1_assets), 1, course1_assets)
fake_course = CourseLocator('test', 'fake', 'non')
course_assets, count = self.contentstore.get_all_content_for_course(fake_course)
self.assertEqual(count, 0)
self.assertEqual(course_assets, [])
@ddt.data(True, False)
def test_attrs(self, deprecated):
"""
Test setting and getting attrs
"""
self.set_up_assets(deprecated)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
prelocked = self.contentstore.get_attr(asset_key, 'locked', False)
self.contentstore.set_attr(asset_key, 'locked', not prelocked)
self.assertEqual(self.contentstore.get_attr(asset_key, 'locked', False), not prelocked)
@ddt.data(True, False)
def test_copy_assets(self, deprecated):
"""
copy_all_course_assets
"""
self.set_up_assets(deprecated)
dest_course = CourseLocator('test', 'destination', 'copy')
self.contentstore.copy_all_course_assets(self.course1_key, dest_course)
for filename in self.course1_files:
asset_key = self.course1_key.make_asset_key('asset', filename)
dest_key = dest_course.make_asset_key('asset', filename)
source = self.contentstore.find(asset_key)
copied = self.contentstore.find(dest_key)
for propname in ['name', 'content_type', 'length', 'locked']:
self.assertEqual(getattr(source, propname), getattr(copied, propname))
__, count = self.contentstore.get_all_content_for_course(dest_course)
self.assertEqual(count, len(self.course1_files))
@ddt.data(True, False)
def test_delete_assets(self, deprecated):
"""
delete_all_course_assets
"""
self.set_up_assets(deprecated)
self.contentstore.delete_all_course_assets(self.course1_key)
__, count = self.contentstore.get_all_content_for_course(self.course1_key)
self.assertEqual(count, 0)
# ensure it didn't remove any from other course
__, count = self.contentstore.get_all_content_for_course(self.course2_key)
self.assertEqual(count, len(self.course2_files))
| agpl-3.0 | 2,429,265,326,443,457,500 | 38.260664 | 118 | 0.635563 | false |
markhamstra/spark | python/pyspark/sql/column.py | 28 | 25024 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
if sys.version >= '3':
basestring = str
long = int
from pyspark import copy_func, since
from pyspark.context import SparkContext
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.types import *
__all__ = ["Column"]
def _create_column_from_literal(literal):
sc = SparkContext._active_spark_context
return sc._jvm.functions.lit(literal)
def _create_column_from_name(name):
sc = SparkContext._active_spark_context
return sc._jvm.functions.col(name)
def _to_java_column(col):
if isinstance(col, Column):
jcol = col._jc
elif isinstance(col, basestring):
jcol = _create_column_from_name(col)
else:
raise TypeError(
"Invalid argument, not a string or column: "
"{0} of type {1}. "
"For column literals, use 'lit', 'array', 'struct' or 'create_map' "
"function.".format(col, type(col)))
return jcol
def _to_seq(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(cols)
def _to_list(sc, cols, converter=None):
"""
Convert a list of Column (or names) into a JVM (Scala) List of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toList(cols)
def _unary_op(name, doc="unary operator"):
""" Create a method for given unary operator """
def _(self):
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
def _func_op(name, doc=''):
def _(self):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
def _bin_func_op(name, reverse=False, doc="binary function"):
def _(self, other):
sc = SparkContext._active_spark_context
fn = getattr(sc._jvm.functions, name)
jc = other._jc if isinstance(other, Column) else _create_column_from_literal(other)
njc = fn(self._jc, jc) if not reverse else fn(jc, self._jc)
return Column(njc)
_.__doc__ = doc
return _
def _bin_op(name, doc="binary operator"):
""" Create a method for given binary operator
"""
def _(self, other):
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _
def _reverse_op(name, doc="binary operator"):
""" Create a method for binary operator (this object is on right side)
"""
def _(self, other):
jother = _create_column_from_literal(other)
jc = getattr(jother, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
class Column(object):
"""
A column in a DataFrame.
:class:`Column` instances can be created by::
# 1. Select a column out of a DataFrame
df.colName
df["colName"]
# 2. Create from an expression
df.colName + 1
1 / df.colName
.. versionadded:: 1.3
"""
def __init__(self, jc):
self._jc = jc
# arithmetic operators
__neg__ = _func_op("negate")
__add__ = _bin_op("plus")
__sub__ = _bin_op("minus")
__mul__ = _bin_op("multiply")
__div__ = _bin_op("divide")
__truediv__ = _bin_op("divide")
__mod__ = _bin_op("mod")
__radd__ = _bin_op("plus")
__rsub__ = _reverse_op("minus")
__rmul__ = _bin_op("multiply")
__rdiv__ = _reverse_op("divide")
__rtruediv__ = _reverse_op("divide")
__rmod__ = _reverse_op("mod")
__pow__ = _bin_func_op("pow")
__rpow__ = _bin_func_op("pow", reverse=True)
# logistic operators
__eq__ = _bin_op("equalTo")
__ne__ = _bin_op("notEqual")
__lt__ = _bin_op("lt")
__le__ = _bin_op("leq")
__ge__ = _bin_op("geq")
__gt__ = _bin_op("gt")
_eqNullSafe_doc = """
Equality test that is safe for null values.
:param other: a value or :class:`Column`
>>> from pyspark.sql import Row
>>> df1 = spark.createDataFrame([
... Row(id=1, value='foo'),
... Row(id=2, value=None)
... ])
>>> df1.select(
... df1['value'] == 'foo',
... df1['value'].eqNullSafe('foo'),
... df1['value'].eqNullSafe(None)
... ).show()
+-------------+---------------+----------------+
|(value = foo)|(value <=> foo)|(value <=> NULL)|
+-------------+---------------+----------------+
| true| true| false|
| null| false| true|
+-------------+---------------+----------------+
>>> df2 = spark.createDataFrame([
... Row(value = 'bar'),
... Row(value = None)
... ])
>>> df1.join(df2, df1["value"] == df2["value"]).count()
0
>>> df1.join(df2, df1["value"].eqNullSafe(df2["value"])).count()
1
>>> df2 = spark.createDataFrame([
... Row(id=1, value=float('NaN')),
... Row(id=2, value=42.0),
... Row(id=3, value=None)
... ])
>>> df2.select(
... df2['value'].eqNullSafe(None),
... df2['value'].eqNullSafe(float('NaN')),
... df2['value'].eqNullSafe(42.0)
... ).show()
+----------------+---------------+----------------+
|(value <=> NULL)|(value <=> NaN)|(value <=> 42.0)|
+----------------+---------------+----------------+
| false| true| false|
| false| false| true|
| true| false| false|
+----------------+---------------+----------------+
.. note:: Unlike Pandas, PySpark doesn't consider NaN values to be NULL.
See the `NaN Semantics`_ for details.
.. _NaN Semantics:
https://spark.apache.org/docs/latest/sql-programming-guide.html#nan-semantics
.. versionadded:: 2.3.0
"""
eqNullSafe = _bin_op("eqNullSafe", _eqNullSafe_doc)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _bin_op('and')
__or__ = _bin_op('or')
__invert__ = _func_op('not')
__rand__ = _bin_op("and")
__ror__ = _bin_op("or")
# container operators
def __contains__(self, item):
raise ValueError("Cannot apply 'in' operator against a column: please use 'contains' "
"in a string column or 'array_contains' function for an array column.")
# bitwise operators
_bitwiseOR_doc = """
Compute bitwise OR of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise or(|) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseOR(df.b)).collect()
[Row((a | b)=235)]
"""
_bitwiseAND_doc = """
Compute bitwise AND of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise and(&) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseAND(df.b)).collect()
[Row((a & b)=10)]
"""
_bitwiseXOR_doc = """
Compute bitwise XOR of this expression with another expression.
:param other: a value or :class:`Column` to calculate bitwise xor(^) against
this :class:`Column`.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseXOR(df.b)).collect()
[Row((a ^ b)=225)]
"""
bitwiseOR = _bin_op("bitwiseOR", _bitwiseOR_doc)
bitwiseAND = _bin_op("bitwiseAND", _bitwiseAND_doc)
bitwiseXOR = _bin_op("bitwiseXOR", _bitwiseXOR_doc)
@since(1.3)
def getItem(self, key):
"""
An expression that gets an item at position ``ordinal`` out of a list,
or gets an item by key out of a dict.
>>> df = spark.createDataFrame([([1, 2], {"key": "value"})], ["l", "d"])
>>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
>>> df.select(df.l[0], df.d["key"]).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
"""
return self[key]
@since(1.3)
def getField(self, name):
"""
An expression that gets a field by name in a StructField.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(r=Row(a=1, b="b"))])
>>> df.select(df.r.getField("b")).show()
+---+
|r.b|
+---+
| b|
+---+
>>> df.select(df.r.a).show()
+---+
|r.a|
+---+
| 1|
+---+
"""
return self[name]
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
return self.getField(item)
def __getitem__(self, k):
if isinstance(k, slice):
if k.step is not None:
raise ValueError("slice with step is not supported.")
return self.substr(k.start, k.stop)
else:
return _bin_op("apply")(self, k)
def __iter__(self):
raise TypeError("Column is not iterable")
# string methods
_contains_doc = """
Contains the other element. Returns a boolean :class:`Column` based on a string match.
:param other: string in line
>>> df.filter(df.name.contains('o')).collect()
[Row(age=5, name=u'Bob')]
"""
_rlike_doc = """
SQL RLIKE expression (LIKE with Regex). Returns a boolean :class:`Column` based on a regex
match.
:param other: an extended regex expression
>>> df.filter(df.name.rlike('ice$')).collect()
[Row(age=2, name=u'Alice')]
"""
_like_doc = """
SQL like expression. Returns a boolean :class:`Column` based on a SQL LIKE match.
:param other: a SQL LIKE pattern
See :func:`rlike` for a regex version
>>> df.filter(df.name.like('Al%')).collect()
[Row(age=2, name=u'Alice')]
"""
_startswith_doc = """
String starts with. Returns a boolean :class:`Column` based on a string match.
:param other: string at start of line (do not use a regex `^`)
>>> df.filter(df.name.startswith('Al')).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter(df.name.startswith('^Al')).collect()
[]
"""
_endswith_doc = """
String ends with. Returns a boolean :class:`Column` based on a string match.
:param other: string at end of line (do not use a regex `$`)
>>> df.filter(df.name.endswith('ice')).collect()
[Row(age=2, name=u'Alice')]
>>> df.filter(df.name.endswith('ice$')).collect()
[]
"""
contains = ignore_unicode_prefix(_bin_op("contains", _contains_doc))
rlike = ignore_unicode_prefix(_bin_op("rlike", _rlike_doc))
like = ignore_unicode_prefix(_bin_op("like", _like_doc))
startswith = ignore_unicode_prefix(_bin_op("startsWith", _startswith_doc))
endswith = ignore_unicode_prefix(_bin_op("endsWith", _endswith_doc))
@ignore_unicode_prefix
@since(1.3)
def substr(self, startPos, length):
"""
Return a :class:`Column` which is a substring of the column.
:param startPos: start position (int or Column)
:param length: length of the substring (int or Column)
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col=u'Ali'), Row(col=u'Bob')]
"""
if type(startPos) != type(length):
raise TypeError(
"startPos and length must be the same type. "
"Got {startPos_t} and {length_t}, respectively."
.format(
startPos_t=type(startPos),
length_t=type(length),
))
if isinstance(startPos, int):
jc = self._jc.substr(startPos, length)
elif isinstance(startPos, Column):
jc = self._jc.substr(startPos._jc, length._jc)
else:
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def isin(self, *cols):
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
>>> df[df.name.isin("Bob", "Mike")].collect()
[Row(age=5, name=u'Bob')]
>>> df[df.age.isin([1, 2, 3])].collect()
[Row(age=2, name=u'Alice')]
"""
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
cols = [c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols]
sc = SparkContext._active_spark_context
jc = getattr(self._jc, "isin")(_to_seq(sc, cols))
return Column(jc)
# order
_asc_doc = """
Returns a sort expression based on ascending order of the column.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc()).collect()
[Row(name=u'Alice'), Row(name=u'Tom')]
"""
_asc_nulls_first_doc = """
Returns a sort expression based on ascending order of the column, and null values
return before non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_first()).collect()
[Row(name=None), Row(name=u'Alice'), Row(name=u'Tom')]
.. versionadded:: 2.4
"""
_asc_nulls_last_doc = """
Returns a sort expression based on ascending order of the column, and null values
appear after non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_last()).collect()
[Row(name=u'Alice'), Row(name=u'Tom'), Row(name=None)]
.. versionadded:: 2.4
"""
_desc_doc = """
Returns a sort expression based on the descending order of the column.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc()).collect()
[Row(name=u'Tom'), Row(name=u'Alice')]
"""
_desc_nulls_first_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear before non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_first()).collect()
[Row(name=None), Row(name=u'Tom'), Row(name=u'Alice')]
.. versionadded:: 2.4
"""
_desc_nulls_last_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear after non-null values.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_last()).collect()
[Row(name=u'Tom'), Row(name=u'Alice'), Row(name=None)]
.. versionadded:: 2.4
"""
asc = ignore_unicode_prefix(_unary_op("asc", _asc_doc))
asc_nulls_first = ignore_unicode_prefix(_unary_op("asc_nulls_first", _asc_nulls_first_doc))
asc_nulls_last = ignore_unicode_prefix(_unary_op("asc_nulls_last", _asc_nulls_last_doc))
desc = ignore_unicode_prefix(_unary_op("desc", _desc_doc))
desc_nulls_first = ignore_unicode_prefix(_unary_op("desc_nulls_first", _desc_nulls_first_doc))
desc_nulls_last = ignore_unicode_prefix(_unary_op("desc_nulls_last", _desc_nulls_last_doc))
_isNull_doc = """
True if the current expression is null.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name=u'Tom', height=80), Row(name=u'Alice', height=None)])
>>> df.filter(df.height.isNull()).collect()
[Row(height=None, name=u'Alice')]
"""
_isNotNull_doc = """
True if the current expression is NOT null.
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name=u'Tom', height=80), Row(name=u'Alice', height=None)])
>>> df.filter(df.height.isNotNull()).collect()
[Row(height=80, name=u'Tom')]
"""
isNull = ignore_unicode_prefix(_unary_op("isNull", _isNull_doc))
isNotNull = ignore_unicode_prefix(_unary_op("isNotNull", _isNotNull_doc))
@since(1.3)
def alias(self, *alias, **kwargs):
"""
Returns this column aliased with a new name or names (in the case of expressions that
return more than one column, such as explode).
:param alias: strings of desired column names (collects all positional arguments passed)
:param metadata: a dict of information to be stored in ``metadata`` attribute of the
corresponding :class: `StructField` (optional, keyword only argument)
.. versionchanged:: 2.2
Added optional ``metadata`` argument.
>>> df.select(df.age.alias("age2")).collect()
[Row(age2=2), Row(age2=5)]
>>> df.select(df.age.alias("age3", metadata={'max': 99})).schema['age3'].metadata['max']
99
"""
metadata = kwargs.pop('metadata', None)
assert not kwargs, 'Unexpected kwargs where passed: %s' % kwargs
sc = SparkContext._active_spark_context
if len(alias) == 1:
if metadata:
jmeta = sc._jvm.org.apache.spark.sql.types.Metadata.fromJson(
json.dumps(metadata))
return Column(getattr(self._jc, "as")(alias[0], jmeta))
else:
return Column(getattr(self._jc, "as")(alias[0]))
else:
if metadata:
raise ValueError('metadata can only be provided for a single column')
return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
name = copy_func(alias, sinceversion=2.0, doc=":func:`name` is an alias for :func:`alias`.")
@ignore_unicode_prefix
@since(1.3)
def cast(self, dataType):
""" Convert the column into type ``dataType``.
>>> df.select(df.age.cast("string").alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
>>> df.select(df.age.cast(StringType()).alias('ages')).collect()
[Row(ages=u'2'), Row(ages=u'5')]
"""
if isinstance(dataType, basestring):
jc = self._jc.cast(dataType)
elif isinstance(dataType, DataType):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
jdt = spark._jsparkSession.parseDataType(dataType.json())
jc = self._jc.cast(jdt)
else:
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.")
@since(1.3)
def between(self, lowerBound, upperBound):
"""
A boolean expression that is evaluated to true if the value of this
expression is between the given columns.
>>> df.select(df.name, df.age.between(2, 4)).show()
+-----+---------------------------+
| name|((age >= 2) AND (age <= 4))|
+-----+---------------------------+
|Alice| true|
| Bob| false|
+-----+---------------------------+
"""
return (self >= lowerBound) & (self <= upperBound)
@since(1.4)
def when(self, condition, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)).show()
+-----+------------------------------------------------------------+
| name|CASE WHEN (age > 4) THEN 1 WHEN (age < 3) THEN -1 ELSE 0 END|
+-----+------------------------------------------------------------+
|Alice| -1|
| Bob| 1|
+-----+------------------------------------------------------------+
"""
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = self._jc.when(condition._jc, v)
return Column(jc)
@since(1.4)
def otherwise(self, value):
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
See :func:`pyspark.sql.functions.when` for example usage.
:param value: a literal value, or a :class:`Column` expression.
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 3, 1).otherwise(0)).show()
+-----+-------------------------------------+
| name|CASE WHEN (age > 3) THEN 1 ELSE 0 END|
+-----+-------------------------------------+
|Alice| 0|
| Bob| 1|
+-----+-------------------------------------+
"""
v = value._jc if isinstance(value, Column) else value
jc = self._jc.otherwise(v)
return Column(jc)
@since(1.4)
def over(self, window):
"""
Define a windowing column.
:param window: a :class:`WindowSpec`
:return: a Column
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age").rowsBetween(-1, 1)
>>> from pyspark.sql.functions import rank, min
>>> # df.select(rank().over(window), min('age').over(window))
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
def __nonzero__(self):
raise ValueError("Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
"'~' for 'not' when building DataFrame boolean expressions.")
__bool__ = __nonzero__
def __repr__(self):
return 'Column<%s>' % self._jc.toString().encode('utf8')
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.column tests")\
.getOrCreate()
sc = spark.sparkContext
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -3,007,225,937,365,343,000 | 34.047619 | 100 | 0.55163 | false |
ivoire/DataTag | DataTag/config.py | 1 | 5663 | # -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2015 Rémi Duraffort
# This file is part of DataTag.
#
# DataTag is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DataTag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with DataTag. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
import yaml
class CategoryConf(object):
def __init__(self, name, description):
self.name = name
self.description = description
class MediaConf(object):
def __init__(self, pattern, tags, description):
self.pattern = pattern
self.tags = tags
self.description = description
class TagConf(object):
def __init__(self, name, description, shortname, groups, category, public):
self.name = name
self.description = description
self.shortname = shortname
self.groups = groups
self.category = category
self.public = public
class Configuration(object):
def __init__(self):
self.medias = []
self.tags = {}
self.categories = {}
self.exclude = []
self.default_groups = []
def load(self, filename):
try:
# Load the configuration file
with open(filename, 'r') as fin:
y_conf = yaml.load(fin)
# Load the medias
for media in y_conf.get('medias', []):
pattern = media['pattern']
if not isinstance(pattern, list):
pattern = [pattern]
self.medias.append(MediaConf(pattern,
media.get('tags', []),
media.get('description', None)))
# Load the tags
tags = y_conf.get('tags', {})
for tag_name in tags:
tag = tags[tag_name]
self.tags[tag_name] = TagConf(tag_name,
tag.get('description', None),
tag.get('shortname', None),
set(tag.get('groups', [])),
tag.get('category', None),
tag.get('public', False))
# Load categories
categories = y_conf.get('categories', {})
for category_name in categories:
category = categories[category_name]
self.categories[category_name] = CategoryConf(
category_name,
category.get('description', None))
# Load excludes and default groups
for exclude in y_conf.get('exclude', []):
self.exclude.append(exclude)
for group_name in y_conf.get('defaults', {}).get('groups', []):
self.default_groups.append(group_name)
except IOError:
pass
def media_tags(self):
tags = set()
for pattern in self.medias:
tags.update(pattern.tags)
return tags
def tag_set(self):
return set(self.tags.keys())
def dump(self, filename):
medias = []
tags = {}
categories = {}
# Create the list of media dicts
for media in self.medias:
new_media = {'pattern': media.pattern}
if media.tags:
new_media['tags'] = media.tags
if media.description:
new_media['description'] = media.description
medias.append(new_media)
# Create the list of categories
for cat_name in self.categories:
cat = self.categories[cat_name]
categories[cat_name] = {}
if cat.description:
categories[cat_name]['description'] = cat.description
# Create the list of tags dict
for tag_name in self.tags:
tag = self.tags[tag_name]
tags[tag.name] = {}
if tag.description:
tags[tag.name]['description'] = tag.description
if tag.shortname:
tags[tag.name]['shortname'] = tag.shortname
if tag.groups:
tags[tag.name]['groups'] = list(tag.groups)
if tag.category:
tags[tag.name]['category'] = tag.category
if tag.public:
tags[tag.name]['public'] = True
# Create the final dict
to_dump = {}
if medias:
to_dump['medias'] = medias
if categories:
to_dump['categories'] = categories
if tags:
to_dump['tags'] = tags
if self.exclude:
to_dump['exclude'] = self.exclude
if self.default_groups:
to_dump['defaults'] = dict()
to_dump['defaults']['groups'] = self.default_groups
with open(filename, 'w') as fout:
yaml.dump(to_dump, fout,
default_flow_style=False, default_style=None, indent=1)
| agpl-3.0 | 16,048,968,638,121,734 | 35.063694 | 90 | 0.513599 | false |
dmarteau/QGIS | python/plugins/processing/algs/gdal/sieve.py | 15 | 5672 | # -*- coding: utf-8 -*-
"""
***************************************************************************
sieve.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterNumber,
QgsProcessingParameterBoolean,
QgsProcessingParameterString,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class sieve(GdalAlgorithm):
INPUT = 'INPUT'
THRESHOLD = 'THRESHOLD'
EIGHT_CONNECTEDNESS = 'EIGHT_CONNECTEDNESS'
NO_MASK = 'NO_MASK'
MASK_LAYER = 'MASK_LAYER'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.THRESHOLD,
self.tr('Threshold'),
type=QgsProcessingParameterNumber.Integer,
minValue=0,
defaultValue=10))
self.addParameter(QgsProcessingParameterBoolean(self.EIGHT_CONNECTEDNESS,
self.tr('Use 8-connectedness'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_MASK,
self.tr('Do not use the default validity mask for the input band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterRasterLayer(self.MASK_LAYER,
self.tr('Validity mask'),
optional=True))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Sieved')))
def name(self):
return 'sieve'
def displayName(self):
return self.tr('Sieve')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'sieve.png'))
def commandName(self):
return 'gdal_sieve'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = [
'-st',
str(self.parameterAsInt(parameters, self.THRESHOLD, context)),
]
if self.parameterAsBoolean(parameters, self.EIGHT_CONNECTEDNESS, context):
arguments.append('-8')
else:
arguments.append('-4')
if self.parameterAsBoolean(parameters, self.NO_MASK, context):
arguments.append('-nomask')
mask = self.parameterAsRasterLayer(parameters, self.MASK_LAYER, context)
if mask:
arguments.append('-mask')
arguments.append(mask.source())
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
self.setOutputValue(self.OUTPUT, out)
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
arguments.append(out)
return [self.commandName() + ('.bat' if isWindows() else '.py'), GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | 3,877,685,800,800,242,700 | 41.328358 | 123 | 0.530148 | false |
stefanhenneking/mxnet | python/mxnet/executor_manager.py | 38 | 17449 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-locals, too-many-arguments, too-many-statements
"""Executor manager."""
from __future__ import absolute_import
import logging
import numpy as np
from .base import mx_real_t
from . import ndarray as nd
from .context import cpu
from .io import DataDesc
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
In case of too many splits, leading to some empty slices.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices. Some splits are empty.')
slices.append(slice(begin, end))
return slices
def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name)
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst)
def _load_data(batch, targets):
"""Load data into sliced arrays."""
_load_general(batch.data, targets)
def _load_label(batch, targets):
"""Load label into sliced arrays."""
_load_general(batch.label, targets)
# pylint: disable=too-many-branches
def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False,
base_exec=None, shared_data_arrays=None, input_types=None, logger=logging):
"""bind executor for bucketing, potentially sharing data with an existing executor."""
arg_shape, _, aux_shape = sym.infer_shape(**input_shapes)
assert(arg_shape is not None)
if input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
arg_types, _, aux_types = sym.infer_type(**input_types)
assert(arg_types is not None)
arg_arrays = []
grad_arrays = {} if need_grad != False else None
arg_names = sym.list_arguments()
if need_grad is False:
need_grad = set()
elif need_grad is True:
need_grad = set(arg_names) - set(input_shapes.keys())
elif isinstance(need_grad, set):
pass
else:
raise AssertionError("need_grad must be boolean or set.")
grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names}
# create or borrow arguments and gradients
for i, name in enumerate(arg_names):
if not name in param_names:
# data or label
if shared_data_arrays is not None and \
name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]):
# good, we can share this memory
assert(arg_types[i] == arg_arr.dtype)
arg_arr = arg_arr.reshape(arg_shape[i])
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to be the bucket taking the largest ') +
('input for better memory sharing.'))
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if shared_data_arrays is not None:
shared_data_arrays[name] = arg_arr
arg_arrays.append(arg_arr)
else:
# model parameter
if base_exec is None:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if name in need_grad:
grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
grad_arrays[name] = grad_arr
else:
arg_arr = base_exec.arg_dict[name]
assert arg_arr.shape == arg_shape[i]
assert arg_arr.dtype == arg_types[i]
if name in need_grad:
grad_arrays[name] = base_exec.grad_dict[name]
arg_arrays.append(arg_arr)
# create or borrow aux variables
if base_exec is None:
aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)]
else:
for i, a in enumerate(base_exec.aux_arrays):
assert aux_shape[i] == a.shape
assert aux_types[i] == a.dtype
aux_arrays = [a for a in base_exec.aux_arrays]
executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays,
aux_states=aux_arrays,
grad_req=grad_req, shared_exec=base_exec)
return executor
class DataParallelExecutorGroup(object):
"""A group of executors living on different devices, for data parallelization.
Parameters
----------
sym: Symbol
The network configuration.
arg_names: list of str
Equals `sym.list_arguments()`
param_names: list of str
List of names of all trainable parameters.
ctx: list of Context
List of devices for training (data parallelization).
slices: list of int
Describes how the data parallelization splits data into different devices.
train_data: DataIter (or DataBatch)
The dataset for training. It could be any object with `provide_data` and
`provide_label` properties. Loading of actual data is not necessarily needed
at this stage.
shared_grop: DataParallelExecutorGroup
An existing executor group, if to share parameters with it.
"""
def __init__(self, sym, arg_names, param_names, ctx, slices, train_data, shared_group=None):
# make sure the architecture is valid
_check_arguments(sym)
if shared_group is None:
self.shared_data_arrays = [{} for _ in ctx]
else:
self.shared_data_arrays = shared_group.shared_data_arrays
self.data_names = [x[0] for x in train_data.provide_data]
self.label_names = [x[0] for x in train_data.provide_label]
self.aux_names = sym.list_auxiliary_states()
self.param_idx = [i for i in range(len(arg_names)) if arg_names[i] in param_names]
self.param_names = [arg_names[i] for i in self.param_idx]
self.train_execs = []
for i, ctxi in enumerate(ctx):
data_shapes = {}
data_types = {}
for x in train_data.provide_data + train_data.provide_label:
data_shapes[x[0]] = tuple([slices[i].stop - slices[i].start] + list(x[1][1:]))
if isinstance(x, DataDesc):
data_types[x.name] = x.dtype
else:
data_types[x[0]] = mx_real_t
shared_exec = None if shared_group is None else shared_group.train_execs[i]
train_exec = _bind_exec(sym, ctxi, data_shapes, self.param_names,
need_grad=True, base_exec=shared_exec,
shared_data_arrays=self.shared_data_arrays[i],
input_types=data_types)
self.train_execs.append(train_exec)
# data structure
self.data_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.data_names]
self.label_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.label_names]
self.param_arrays = [[e.arg_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.grad_arrays = [[e.grad_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.aux_arrays = [[e.aux_arrays[i] for e in self.train_execs]
for i in range(len(self.aux_names))]
self.slices = slices
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
_load_data(data_batch, self.data_arrays)
_load_label(data_batch, self.label_arrays)
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
def backward(self):
"""Perform a backward pass on each executor."""
for texec in self.train_execs:
texec.backward()
def update_metric(self, metric, labels):
"""Update evaluation metric with label and current outputs."""
for texec, islice in zip(self.train_execs, self.slices):
labels_slice = [label[islice] for label in labels]
metric.update(labels_slice, texec.outputs)
class DataParallelExecutorManager(object):
""" Helper class to manage multiple executors for data parallelism.
Parameters
----------
symbol : Symbol
Output symbol.
ctx : list of Context
Devices to run on.
param_names: list of str
Name of all trainable parameters of the network.
arg_names: list of str
Name of all arguments of the network.
aux_names: list of str
Name of all auxiliary states of the network.
train_data : DataIter
Training data iterator.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ctx.
logger : logging logger
When not specified, default logger will be used.
sym_gen : A function that generate new Symbols depending on different
input shapes. Used only for bucketing.
"""
def __init__(self, symbol, ctx, train_data,
arg_names, param_names, aux_names,
work_load_list=None, logger=None, sym_gen=None):
if logger is None:
logger = logging
# preparation
num_device = len(ctx)
logger.info('Start training with %s', str(ctx))
if work_load_list is None:
work_load_list = [1] * num_device
assert isinstance(work_load_list, list) and len(work_load_list) == num_device, \
"Invalid settings for work load. "
slices = _split_input_slice(train_data.batch_size, work_load_list)
self.slices = slices
self.arg_names = arg_names
self.param_names = param_names
self.aux_names = aux_names
self.ctx = ctx
self.execgrp = DataParallelExecutorGroup(symbol, self.arg_names, self.param_names, self.ctx,
self.slices, train_data)
self.symbol = symbol
self.sym_gen = sym_gen
self.curr_execgrp = None # this is set when data is loaded
if self.sym_gen is not None:
self.execgrp_bucket = {train_data.default_bucket_key: self.execgrp}
def install_monitor(self, monitor):
"""Install monitor on all executors."""
if self.sym_gen is not None:
raise NotImplementedError("Monitoring is not implemented for bucketing")
for train_exec in self.execgrp.train_execs:
monitor.install(train_exec)
def set_params(self, arg_params, aux_params):
"""Set parameter and aux values.
Parameters
----------
arg_params : list of NDArray
Source parameter arrays
aux_params : list of NDArray
Source aux arrays.
"""
for texec in self.execgrp.train_execs:
texec.copy_params_from(arg_params, aux_params)
def copy_to(self, arg_params, aux_params):
""" Copy data from each executor to ```arg_params`` and ``aux_params``.
Parameters
----------
arg_params : list of NDArray
Target parameter arrays.
aux_params : list of NDArray
Target aux arrays.
Notes
-----
- This function will inplace update the NDArrays in arg_params and aux_params.
"""
for name, block in zip(self.param_names, self.param_arrays):
weight = sum(w.copyto(cpu()) for w in block) / len(block)
weight.astype(arg_params[name].dtype).copyto(arg_params[name])
for name, block in zip(self.aux_names, self.aux_arrays):
weight = sum(w.copyto(cpu()) for w in block) / len(block)
weight.astype(aux_params[name].dtype).copyto(aux_params[name])
@property
def param_arrays(self):
"""Shared parameter arrays."""
# param arrays should be shared by all executor groups
return self.execgrp.param_arrays
@property
def grad_arrays(self):
"""Shared gradient arrays."""
# grad arrays should be shared by all executor groups
return self.execgrp.grad_arrays
@property
def aux_arrays(self):
"""Shared aux states."""
# aux arrays are also shared by all executor groups
return self.execgrp.aux_arrays
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
if self.sym_gen is not None:
key = data_batch.bucket_key
if key not in self.execgrp_bucket:
# create new bucket entry
symbol = self.sym_gen(key)
execgrp = DataParallelExecutorGroup(symbol, self.arg_names,
self.param_names, self.ctx,
self.slices, data_batch,
shared_group=self.execgrp)
self.execgrp_bucket[key] = execgrp
self.curr_execgrp = self.execgrp_bucket[key]
else:
self.curr_execgrp = self.execgrp
self.curr_execgrp.load_data_batch(data_batch)
def forward(self, is_train=False):
"""Run forward on the current executor."""
self.curr_execgrp.forward(is_train=is_train)
def backward(self):
"""Run backward on the current executor."""
self.curr_execgrp.backward()
def update_metric(self, metric, labels):
"""Update metric with the current executor."""
self.curr_execgrp.update_metric(metric, labels)
| apache-2.0 | 97,544,107,653,947,900 | 38.566893 | 106 | 0.586165 | false |
jayceyxc/hue | desktop/core/ext-py/Django-1.6.10/tests/datetimes/tests.py | 49 | 3213 | from __future__ import absolute_import
import datetime
try:
import pytz
except ImportError:
pytz = None
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import timezone
from django.utils.unittest import skipIf
from .models import Article, Comment, Category
class DateTimesTests(TestCase):
def test_related_model_traverse(self):
a1 = Article.objects.create(
title="First one",
pub_date=datetime.datetime(2005, 7, 28, 9, 0, 0),
)
a2 = Article.objects.create(
title="Another one",
pub_date=datetime.datetime(2010, 7, 28, 10, 0, 0),
)
a3 = Article.objects.create(
title="Third one, in the first day",
pub_date=datetime.datetime(2005, 7, 28, 17, 0, 0),
)
a1.comments.create(
text="Im the HULK!",
pub_date=datetime.datetime(2005, 7, 28, 9, 30, 0),
)
a1.comments.create(
text="HULK SMASH!",
pub_date=datetime.datetime(2005, 7, 29, 1, 30, 0),
)
a2.comments.create(
text="LMAO",
pub_date=datetime.datetime(2010, 7, 28, 10, 10, 10),
)
a3.comments.create(
text="+1",
pub_date=datetime.datetime(2005, 8, 29, 10, 10, 10),
)
c = Category.objects.create(name="serious-news")
c.articles.add(a1, a3)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "year"), [
datetime.datetime(2005, 1, 1),
datetime.datetime(2010, 1, 1),
],
lambda d: d,
)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "month"), [
datetime.datetime(2005, 7, 1),
datetime.datetime(2010, 7, 1),
],
lambda d: d
)
self.assertQuerysetEqual(
Comment.objects.datetimes("article__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.datetimes("comments__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
datetime.datetime(2005, 7, 29),
datetime.datetime(2005, 8, 29),
datetime.datetime(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.datetimes("comments__approval_date", "day"), []
)
self.assertQuerysetEqual(
Category.objects.datetimes("articles__pub_date", "day"), [
datetime.datetime(2005, 7, 28),
],
lambda d: d,
)
@skipIf(pytz is None, "this test requires pytz")
@override_settings(USE_TZ=True)
def test_21432(self):
now = timezone.localtime(timezone.now().replace(microsecond=0))
Article.objects.create(title="First one", pub_date=now)
qs = Article.objects.datetimes('pub_date', 'second')
self.assertEqual(qs[0], now)
| apache-2.0 | -7,972,315,571,877,068,000 | 31.13 | 75 | 0.540305 | false |
2ndQuadrant/ansible | lib/ansible/modules/cloud/google/gcp_container_cluster_facts.py | 5 | 17700 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_container_cluster_facts
description:
- Gather facts for GCP Cluster
short_description: Gather facts for GCP Cluster
version_added: 2.8
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
location:
description:
- The location where the cluster is deployed.
required: true
aliases:
- region
- zone
version_added: 2.8
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a cluster facts"
gcp_container_cluster_facts:
location: us-central1-a
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
name:
description:
- The name of this cluster. The name must be unique within this project and
location, and can be up to 40 characters. Must be Lowercase letters, numbers,
and hyphens only. Must start with a letter. Must end with a number or a letter.
returned: success
type: str
description:
description:
- An optional description of this cluster.
returned: success
type: str
initialNodeCount:
description:
- The number of nodes to create in this cluster. You must ensure that your Compute
Engine resource quota is sufficient for this number of instances. You must
also have available firewall and routes quota. For requests, this field should
only be used in lieu of a "nodePool" object, since this configuration (along
with the "nodeConfig") will be used to create a "NodePool" object with an
auto-generated name. Do not use this and a nodePool at the same time.
returned: success
type: int
nodeConfig:
description:
- Parameters used in creating the cluster's nodes.
- For requests, this field should only be used in lieu of a "nodePool" object,
since this configuration (along with the "initialNodeCount") will be used
to create a "NodePool" object with an auto-generated name. Do not use this
and a nodePool at the same time. For responses, this field will be populated
with the node configuration of the first node pool. If unspecified, the defaults
are used.
returned: success
type: complex
contains:
machineType:
description:
- The name of a Google Compute Engine machine type (e.g.
- n1-standard-1). If unspecified, the default machine type is n1-standard-1.
returned: success
type: str
diskSizeGb:
description:
- Size of the disk attached to each node, specified in GB. The smallest
allowed disk size is 10GB. If unspecified, the default disk size is 100GB.
returned: success
type: int
oauthScopes:
description:
- The set of Google API scopes to be made available on all of the node VMs
under the "default" service account.
- 'The following scopes are recommended, but not required, and by default
are not included: U(https://www.googleapis.com/auth/compute) is required
for mounting persistent storage on your nodes.'
- U(https://www.googleapis.com/auth/devstorage.read_only) is required for
communicating with gcr.io (the Google Container Registry).
- If unspecified, no scopes are added, unless Cloud Logging or Cloud Monitoring
are enabled, in which case their required scopes will be added.
returned: success
type: list
serviceAccount:
description:
- The Google Cloud Platform Service Account to be used by the node VMs.
If no Service Account is specified, the "default" service account is used.
returned: success
type: str
metadata:
description:
- The metadata key/value pairs assigned to instances in the cluster.
- 'Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
in length. These are reflected as part of a URL in the metadata server.
Additionally, to avoid ambiguity, keys must not conflict with any other
metadata keys for the project or be one of the four reserved keys: "instance-template",
"kube-env", "startup-script", and "user-data" Values are free-form strings,
and only have meaning as interpreted by the image running in the instance.
The only restriction placed on them is that each value''s size must be
less than or equal to 32 KB.'
- The total size of all keys and values must be less than 512 KB.
- 'An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
imageType:
description:
- The image type to use for this node. Note that for a given image type,
the latest version of it will be used.
returned: success
type: str
labels:
description:
- 'The map of Kubernetes labels (key/value pairs) to be applied to each
node. These will added in addition to any default label(s) that Kubernetes
may apply to the node. In case of conflict in label keys, the applied
set may differ depending on the Kubernetes version -- it''s best to assume
the behavior is undefined and conflicts should be avoided. For more information,
including usage and the valid values, see: U(http://kubernetes.io/v1.1/docs/user-guide/labels.html)
An object containing a list of "key": value pairs.'
- 'Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }.'
returned: success
type: dict
localSsdCount:
description:
- The number of local SSD disks to be attached to the node.
- 'The limit for this value is dependant upon the maximum number of disks
available on a machine per zone. See: U(https://cloud.google.com/compute/docs/disks/local-ssd#local_ssd_limits)
for more information.'
returned: success
type: int
tags:
description:
- The list of instance tags applied to all nodes. Tags are used to identify
valid sources or targets for network firewalls and are specified by the
client during cluster or node pool creation. Each tag within the list
must comply with RFC1035.
returned: success
type: list
preemptible:
description:
- 'Whether the nodes are created as preemptible VM instances. See: U(https://cloud.google.com/compute/docs/instances/preemptible)
for more information about preemptible VM instances.'
returned: success
type: bool
masterAuth:
description:
- The authentication information for accessing the master endpoint.
returned: success
type: complex
contains:
username:
description:
- The username to use for HTTP basic authentication to the master endpoint.
returned: success
type: str
password:
description:
- The password to use for HTTP basic authentication to the master endpoint.
Because the master endpoint is open to the Internet, you should create
a strong password.
returned: success
type: str
clusterCaCertificate:
description:
- Base64-encoded public certificate that is the root of trust for the cluster.
returned: success
type: str
clientCertificate:
description:
- Base64-encoded public certificate used by clients to authenticate to the
cluster endpoint.
returned: success
type: str
clientKey:
description:
- Base64-encoded private key used by clients to authenticate to the cluster
endpoint.
returned: success
type: str
loggingService:
description:
- 'The logging service the cluster should use to write logs. Currently available
options: logging.googleapis.com - the Google Cloud Logging service.'
- none - no logs will be exported from the cluster.
- if left as an empty string,logging.googleapis.com will be used.
returned: success
type: str
monitoringService:
description:
- The monitoring service the cluster should use to write metrics.
- 'Currently available options: monitoring.googleapis.com - the Google Cloud
Monitoring service.'
- none - no metrics will be exported from the cluster.
- if left as an empty string, monitoring.googleapis.com will be used.
returned: success
type: str
network:
description:
- The name of the Google Compute Engine network to which the cluster is connected.
If left unspecified, the default network will be used.
returned: success
type: str
privateClusterConfig:
description:
- Configuration for a private cluster.
returned: success
type: complex
contains:
enablePrivateNodes:
description:
- Whether nodes have internal IP addresses only. If enabled, all nodes are
given only RFC 1918 private addresses and communicate with the master
via private networking.
returned: success
type: bool
enablePrivateEndpoint:
description:
- Whether the master's internal IP address is used as the cluster endpoint.
returned: success
type: bool
masterIpv4CidrBlock:
description:
- The IP range in CIDR notation to use for the hosted master network. This
range will be used for assigning internal IP addresses to the master or
set of masters, as well as the ILB VIP. This range must not overlap with
any other ranges in use within the cluster's network.
returned: success
type: str
privateEndpoint:
description:
- The internal IP address of this cluster's master endpoint.
returned: success
type: str
publicEndpoint:
description:
- The external IP address of this cluster's master endpoint.
returned: success
type: str
clusterIpv4Cidr:
description:
- The IP address range of the container pods in this cluster, in CIDR notation
(e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify
a /14 block in 10.0.0.0/8.
returned: success
type: str
addonsConfig:
description:
- Configurations for the various addons available to run in the cluster.
returned: success
type: complex
contains:
httpLoadBalancing:
description:
- Configuration for the HTTP (L7) load balancing controller addon, which
makes it easy to set up HTTP load balancers for services in a cluster.
returned: success
type: complex
contains:
disabled:
description:
- Whether the HTTP Load Balancing controller is enabled in the cluster.
When enabled, it runs a small pod in the cluster that manages the
load balancers.
returned: success
type: bool
horizontalPodAutoscaling:
description:
- Configuration for the horizontal pod autoscaling feature, which increases
or decreases the number of replica pods a replication controller has based
on the resource usage of the existing pods.
returned: success
type: complex
contains:
disabled:
description:
- Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
When enabled, it ensures that a Heapster pod is running in the cluster,
which is also used by the Cloud Monitoring service.
returned: success
type: bool
subnetwork:
description:
- The name of the Google Compute Engine subnetwork to which the cluster is connected.
returned: success
type: str
endpoint:
description:
- The IP address of this cluster's master endpoint.
- The endpoint can be accessed from the internet at https://username:password@endpoint/
See the masterAuth property of this resource for username and password information.
returned: success
type: str
initialClusterVersion:
description:
- The software version of the master endpoint and kubelets used in the cluster
when it was first created. The version can be upgraded over time.
returned: success
type: str
currentMasterVersion:
description:
- The current software version of the master endpoint.
returned: success
type: str
currentNodeVersion:
description:
- The current version of the node software components. If they are currently
at multiple versions because they're in the process of being upgraded, this
reflects the minimum version of all nodes.
returned: success
type: str
createTime:
description:
- The time the cluster was created, in RFC3339 text format.
returned: success
type: str
nodeIpv4CidrSize:
description:
- The size of the address space on each node for hosting containers.
- This is provisioned from within the container_ipv4_cidr range.
returned: success
type: int
servicesIpv4Cidr:
description:
- The IP address range of the Kubernetes services in this cluster, in CIDR notation
(e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from
the container CIDR.
returned: success
type: str
currentNodeCount:
description:
- The number of nodes currently in the cluster.
returned: success
type: int
expireTime:
description:
- The time the cluster will be automatically deleted in RFC3339 text format.
returned: success
type: str
location:
description:
- The location where the cluster is deployed.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(location=dict(required=True, type='str', aliases=['region', 'zone'])))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
items = fetch_list(module, collection(module))
if items.get('clusters'):
items = items.get('clusters')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://container.googleapis.com/v1/projects/{project}/locations/{location}/clusters".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'container')
response = auth.get(link)
return return_if_object(module, response)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 | -7,183,793,086,362,560,000 | 38.420935 | 139 | 0.61678 | false |
0x726d77/storm | storm-client/src/py/storm/DistributedRPC.py | 22 | 9644 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def execute(self, functionName, funcArgs):
"""
Parameters:
- functionName
- funcArgs
"""
self.send_execute(functionName, funcArgs)
return self.recv_execute()
def send_execute(self, functionName, funcArgs):
self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid)
args = execute_args()
args.functionName = functionName
args.funcArgs = funcArgs
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_execute(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = execute_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "execute failed: unknown result")
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["execute"] = Processor.process_execute
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_execute(self, seqid, iprot, oprot):
args = execute_args()
args.read(iprot)
iprot.readMessageEnd()
result = execute_result()
try:
result.success = self._handler.execute(args.functionName, args.funcArgs)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except DRPCExecutionException as e:
msg_type = TMessageType.REPLY
result.e = e
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("execute", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class execute_args:
"""
Attributes:
- functionName
- funcArgs
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', None, None, ), # 1
(2, TType.STRING, 'funcArgs', None, None, ), # 2
)
def __init__(self, functionName=None, funcArgs=None,):
self.functionName = functionName
self.funcArgs = funcArgs
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.funcArgs = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8'))
oprot.writeFieldEnd()
if self.funcArgs is not None:
oprot.writeFieldBegin('funcArgs', TType.STRING, 2)
oprot.writeString(self.funcArgs.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.functionName)
value = (value * 31) ^ hash(self.funcArgs)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class execute_result:
"""
Attributes:
- success
- e
- aze
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
(1, TType.STRUCT, 'e', (DRPCExecutionException, DRPCExecutionException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 2
)
def __init__(self, success=None, e=None, aze=None,):
self.success = success
self.e = e
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('execute_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success.encode('utf-8'))
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 2)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.e)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 | -5,960,140,756,515,216,000 | 30.009646 | 188 | 0.656574 | false |
hfp/tensorflow-xsmm | tensorflow/contrib/eager/python/examples/l2hmc/l2hmc.py | 29 | 12269 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""L2HMC compatible with TensorFlow's eager execution.
Reference [Generalizing Hamiltonian Monte Carlo with Neural
Networks](https://arxiv.org/pdf/1711.09268.pdf)
Code adapted from the released TensorFlow graph implementation by original
authors https://github.com/brain-research/l2hmc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import neural_nets
class Dynamics(tf.keras.Model):
"""Dynamics engine of naive L2HMC sampler."""
def __init__(self,
x_dim,
minus_loglikelihood_fn,
n_steps=25,
eps=.1,
np_seed=1):
"""Initialization.
Args:
x_dim: dimensionality of observed data
minus_loglikelihood_fn: log-likelihood function of conditional probability
n_steps: number of leapfrog steps within each transition
eps: initial value learnable scale of step size
np_seed: Random seed for numpy; used to control sampled masks.
"""
super(Dynamics, self).__init__()
npr.seed(np_seed)
self.x_dim = x_dim
self.potential = minus_loglikelihood_fn
self.n_steps = n_steps
self._construct_time()
self._construct_masks()
self.position_fn = neural_nets.GenericNet(x_dim, factor=2.)
self.momentum_fn = neural_nets.GenericNet(x_dim, factor=1.)
self.eps = tf.Variable(
initial_value=eps, name="eps", dtype=tf.float32, trainable=True)
def apply_transition(self, position):
"""Propose a new state and perform the accept or reject step."""
# Simulate dynamics both forward and backward;
# Use sampled Bernoulli masks to compute the actual solutions
position_f, momentum_f, accept_prob_f = self.transition_kernel(
position, forward=True)
position_b, momentum_b, accept_prob_b = self.transition_kernel(
position, forward=False)
# Decide direction uniformly
batch_size = tf.shape(position)[0]
forward_mask = tf.cast(tf.random_uniform((batch_size,)) > .5, tf.float32)
backward_mask = 1. - forward_mask
# Obtain proposed states
position_post = (
forward_mask[:, None] * position_f +
backward_mask[:, None] * position_b)
momentum_post = (
forward_mask[:, None] * momentum_f +
backward_mask[:, None] * momentum_b)
# Probability of accepting the proposed states
accept_prob = forward_mask * accept_prob_f + backward_mask * accept_prob_b
# Accept or reject step
accept_mask = tf.cast(
accept_prob > tf.random_uniform(tf.shape(accept_prob)), tf.float32)
reject_mask = 1. - accept_mask
# Samples after accept/reject step
position_out = (
accept_mask[:, None] * position_post + reject_mask[:, None] * position)
return position_post, momentum_post, accept_prob, position_out
def transition_kernel(self, position, forward=True):
"""Transition kernel of augmented leapfrog integrator."""
lf_fn = self._forward_lf if forward else self._backward_lf
# Resample momentum
momentum = tf.random_normal(tf.shape(position))
position_post, momentum_post = position, momentum
sumlogdet = 0.
# Apply augmented leapfrog steps
for i in range(self.n_steps):
position_post, momentum_post, logdet = lf_fn(position_post, momentum_post,
i)
sumlogdet += logdet
accept_prob = self._compute_accept_prob(position, momentum, position_post,
momentum_post, sumlogdet)
return position_post, momentum_post, accept_prob
def _forward_lf(self, position, momentum, i):
"""One forward augmented leapfrog step. See eq (5-6) in paper."""
t = self._get_time(i)
mask, mask_inv = self._get_mask(i)
sumlogdet = 0.
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
position, logdet = self._update_position_forward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
momentum, logdet = self._update_momentum_forward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _backward_lf(self, position, momentum, i):
"""One backward augmented leapfrog step. See Appendix A in paper."""
# Reversed index/sinusoidal time
t = self._get_time(self.n_steps - i - 1)
mask, mask_inv = self._get_mask(self.n_steps - i - 1)
sumlogdet = 0.
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask_inv, mask)
sumlogdet += logdet
position, logdet = self._update_position_backward(position, momentum, t,
mask, mask_inv)
sumlogdet += logdet
momentum, logdet = self._update_momentum_backward(position, momentum, t)
sumlogdet += logdet
return position, momentum, sumlogdet
def _update_momentum_forward(self, position, momentum, t):
"""Update v in the forward leapfrog step."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= .5 * self.eps
transformed *= self.eps
momentum = (
momentum * tf.exp(scale) -
.5 * self.eps * (tf.exp(transformed) * grad - translation))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_forward(self, position, momentum, t, mask, mask_inv):
"""Update x in the forward leapfrog step."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= self.eps
transformed *= self.eps
position = (
mask * position +
mask_inv * (position * tf.exp(scale) + self.eps *
(tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _update_momentum_backward(self, position, momentum, t):
"""Update v in the backward leapfrog step. Inverting the forward update."""
grad = self.grad_potential(position)
scale, translation, transformed = self.momentum_fn([position, grad, t])
scale *= -.5 * self.eps
transformed *= self.eps
momentum = (
tf.exp(scale) * (momentum + .5 * self.eps *
(tf.exp(transformed) * grad - translation)))
return momentum, tf.reduce_sum(scale, axis=1)
def _update_position_backward(self, position, momentum, t, mask, mask_inv):
"""Update x in the backward leapfrog step. Inverting the forward update."""
scale, translation, transformed = self.position_fn(
[momentum, mask * position, t])
scale *= -self.eps
transformed *= self.eps
position = (
mask * position + mask_inv * tf.exp(scale) *
(position - self.eps * (tf.exp(transformed) * momentum + translation)))
return position, tf.reduce_sum(mask_inv * scale, axis=1)
def _compute_accept_prob(self, position, momentum, position_post,
momentum_post, sumlogdet):
"""Compute the prob of accepting the proposed state given old state."""
old_hamil = self.hamiltonian(position, momentum)
new_hamil = self.hamiltonian(position_post, momentum_post)
prob = tf.exp(tf.minimum(old_hamil - new_hamil + sumlogdet, 0.))
# Ensure numerical stability as well as correct gradients
return tf.where(tf.is_finite(prob), prob, tf.zeros_like(prob))
def _construct_time(self):
"""Convert leapfrog step index into sinusoidal time."""
self.ts = []
for i in range(self.n_steps):
t = tf.constant(
[
np.cos(2 * np.pi * i / self.n_steps),
np.sin(2 * np.pi * i / self.n_steps)
],
dtype=tf.float32)
self.ts.append(t[None, :])
def _get_time(self, i):
"""Get sinusoidal time for i-th augmented leapfrog step."""
return self.ts[i]
def _construct_masks(self):
"""Construct different binary masks for different time steps."""
self.masks = []
for _ in range(self.n_steps):
# Need to use npr here because tf would generated different random
# values across different `sess.run`
idx = npr.permutation(np.arange(self.x_dim))[:self.x_dim // 2]
mask = np.zeros((self.x_dim,))
mask[idx] = 1.
mask = tf.constant(mask, dtype=tf.float32)
self.masks.append(mask[None, :])
def _get_mask(self, i):
"""Get binary masks for i-th augmented leapfrog step."""
m = self.masks[i]
return m, 1. - m
def kinetic(self, v):
"""Compute the kinetic energy."""
return .5 * tf.reduce_sum(v**2, axis=1)
def hamiltonian(self, position, momentum):
"""Compute the overall Hamiltonian."""
return self.potential(position) + self.kinetic(momentum)
def grad_potential(self, position, check_numerics=True):
"""Get gradient of potential function at current location."""
if tf.executing_eagerly():
grad = tfe.gradients_function(self.potential)(position)[0]
else:
grad = tf.gradients(self.potential(position), position)[0]
return grad
# Examples of unnormalized log densities
def get_scg_energy_fn():
"""Get energy function for 2d strongly correlated Gaussian."""
# Avoid recreating tf constants on each invocation of gradients
mu = tf.constant([0., 0.])
sigma = tf.constant([[50.05, -49.95], [-49.95, 50.05]])
sigma_inv = tf.matrix_inverse(sigma)
def energy(x):
"""Unnormalized minus log density of 2d strongly correlated Gaussian."""
xmmu = x - mu
return .5 * tf.diag_part(
tf.matmul(tf.matmul(xmmu, sigma_inv), tf.transpose(xmmu)))
return energy, mu, sigma
def get_rw_energy_fn():
"""Get energy function for rough well distribution."""
# For small eta, the density underlying the rough-well energy is very close to
# a unit Gaussian; however, the gradient is greatly affected by the small
# cosine perturbations
eta = 1e-2
mu = tf.constant([0., 0.])
sigma = tf.constant([[1., 0.], [0., 1.]])
def energy(x):
ip = tf.reduce_sum(x**2., axis=1)
return .5 * ip + eta * tf.reduce_sum(tf.cos(x / eta), axis=1)
return energy, mu, sigma
# Loss function
def compute_loss(dynamics, x, scale=.1, eps=1e-4):
"""Compute loss defined in equation (8)."""
z = tf.random_normal(tf.shape(x)) # Auxiliary variable
x_, _, x_accept_prob, x_out = dynamics.apply_transition(x)
z_, _, z_accept_prob, _ = dynamics.apply_transition(z)
# Add eps for numerical stability; following released impl
x_loss = tf.reduce_sum((x - x_)**2, axis=1) * x_accept_prob + eps
z_loss = tf.reduce_sum((z - z_)**2, axis=1) * z_accept_prob + eps
loss = tf.reduce_mean(
(1. / x_loss + 1. / z_loss) * scale - (x_loss + z_loss) / scale, axis=0)
return loss, x_out, x_accept_prob
def loss_and_grads(dynamics, x, loss_fn=compute_loss):
"""Obtain loss value and gradients."""
with tf.GradientTape() as tape:
loss_val, out, accept_prob = loss_fn(dynamics, x)
grads = tape.gradient(loss_val, dynamics.trainable_variables)
return loss_val, grads, out, accept_prob
| apache-2.0 | 5,763,348,305,343,312,000 | 33.954416 | 80 | 0.641128 | false |
ymero/tornado | tornado/__init__.py | 75 | 1130 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, print_function, with_statement
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "4.3.dev1"
version_info = (4, 3, 0, -100)
| apache-2.0 | -443,777,591,858,728,260 | 37.965517 | 80 | 0.755752 | false |
sureshsundriyal/pysqlitebkup | pysqlitebkup.py | 1 | 4083 | #! /usr/bin/env python
import ctypes
from ctypes.util import find_library
__author__ = 'Suresh Sundriyal'
__license__ = 'CC0 - No rights reserved.'
__version__ = '0.0.1'
__credits__ = [ 'Joongi Kim: https://gist.github.com/achimnol/3021995',
'sqlite3.org: http://www.sqlite.org/backup.html' ]
SQLITE_OK = 0
SQLITE_ERROR = 1
SQLITE_BUSY = 5
SQLITE_LOCKED = 6
SQLITE_DONE = 101
SQLITE_OPEN_READONLY = 1
SQLITE_OPEN_READWRITE = 2
SQLITE_OPEN_CREATE = 4
sqlite = ctypes.CDLL(find_library('sqlite3'))
sqlite.sqlite3_backup_init.restype = ctypes.c_void_p
class BackupInitError(Exception):
pass
class BackupFailedError(Exception):
pass
class FileOpenError(Exception):
pass
class UninitializedError(Exception):
pass
def _openFile(fileAttributes):
fileName, ptr, mode = fileAttributes
fileName_p = ctypes.c_char_p(fileName.encode('utf-8'))
rc = sqlite.sqlite3_open_v2(fileName_p, ctypes.byref(ptr),
mode, None)
if (rc != SQLITE_OK or ptr.value is None):
raise FileOpenError("Unable to open file(%s), rc(%s)" % (
fileName, rc))
class dbbackup(object):
def __init__(self, src, dst):
self.src = src
self.dst = dst
self.p_src_db = ctypes.c_void_p(None)
self.p_dst_db = ctypes.c_void_p(None)
self.p_backup = ctypes.c_void_p(None)
self.finished = False
self.remaining = None
self.pagecount = None
def __enter__(self):
self.backupInit()
return self
def __exit__(self, type, value, traceback):
self.backupFinish()
def backupInit(self):
# We do this for the side-effect of opening both the files and not
# having boilerplate code and the fact that map is generally faster
# than a for loop.
list(map(_openFile,
[(self.src, self.p_src_db, SQLITE_OPEN_READONLY),
(self.dst, self.p_dst_db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE)]))
dbType = 'main'.encode('utf-8')
self.p_backup = ctypes.c_void_p(sqlite.sqlite3_backup_init(
self.p_dst_db, dbType,
self.p_src_db, dbType))
if self.p_backup.value is None:
raise BackupInitError("Failed to backup_init")
def backupFinish(self):
if self.p_backup.value is not None:
sqlite.sqlite3_backup_finish(self.p_backup)
rc = sqlite.sqlite3_errcode(self.p_dst_db)
if self.p_dst_db.value is not None:
sqlite.sqlite3_close(self.p_dst_db)
if self.p_src_db.value is not None:
sqlite.sqlite3_close(self.p_src_db)
if rc != SQLITE_OK:
raise BackupFailedError("Failed to backup db: rc(%s)" % rc)
def step(self, size=5):
if self.p_backup.value is None:
raise UninitializedError(
"step called without calling backupInit first")
rc = sqlite.sqlite3_backup_step(self.p_backup, size)
self.remaining = sqlite.sqlite3_backup_remaining(self.p_backup)
self.pagecount = sqlite.sqlite3_backup_pagecount(self.p_backup)
if rc == SQLITE_DONE:
self.finished = True
if rc in (SQLITE_OK, SQLITE_BUSY, SQLITE_LOCKED):
# sleep for 250 ms before continuing.
sqlite.sqlite3_sleep(250)
def backup(self, stepSize=5):
import os
__unlink = True
if os.path.exists(self.dst):
__unlink = False
try:
while not self.finished:
self.step(stepSize)
except:
if __unlink:
try:
os.unlink(self.dst)
except OSError as e:
pass
raise
if __name__ == '__main__':
import sys
import logging
try:
with dbbackup(sys.argv[1], sys.argv[2]) as p:
p.backup(20)
except:
logging.exception("Failed to backup sqlite db")
| cc0-1.0 | 1,257,139,710,233,133,300 | 29.244444 | 75 | 0.572128 | false |
PeterFaiman/ruby-grpc-minimal | src/python/grpcio_tests/tests/testing/_server_application.py | 12 | 2824 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using server-side application."""
import grpc
# requests_pb2 is a semantic dependency of this module.
from tests.testing import _application_common
from tests.testing.proto import requests_pb2 # pylint: disable=unused-import
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
class FirstServiceServicer(services_pb2_grpc.FirstServiceServicer):
"""Services RPCs."""
def UnUn(self, request, context):
if _application_common.UNARY_UNARY_REQUEST == request:
return _application_common.UNARY_UNARY_RESPONSE
else:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return services_pb2.Down()
def UnStre(self, request, context):
if _application_common.UNARY_STREAM_REQUEST != request:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return
yield services_pb2.Strange()
def StreUn(self, request_iterator, context):
context.send_initial_metadata((
('server_application_metadata_key', 'Hi there!',),))
for request in request_iterator:
if request != _application_common.STREAM_UNARY_REQUEST:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return services_pb2.Strange()
elif not context.is_active():
return services_pb2.Strange()
else:
return _application_common.STREAM_UNARY_RESPONSE
def StreStre(self, request_iterator, context):
for request in request_iterator:
if request != _application_common.STREAM_STREAM_REQUEST:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details('Something is wrong with your request!')
return
elif not context.is_active():
return
else:
yield _application_common.STREAM_STREAM_RESPONSE
yield _application_common.STREAM_STREAM_RESPONSE
| apache-2.0 | -6,401,218,065,028,231,000 | 41.787879 | 77 | 0.674575 | false |
PantherHackers/PantherBot | scripts/rage.py | 1 | 1230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import upsidedown
import sys
from response import Response
from pb_logging import PBLogger
logger = PBLogger("Rage")
# flips text using upsidedown module and has a donger for emohasis
def run(response, args=[]):
response_obj = Response(sys.modules[__name__])
toFlip = ''
donger = '(ノಠ益ಠ)ノ彡'
for n in range(0, len(args)):
toFlip += args[n] + " "
if toFlip == '':
toFlip = unicode('┻━┻', "utf-8")
try:
donger = unicode(donger, "utf-8")
logger.info(toFlip[:15 or len(toFlip)] + "...")
flippedmsg = upsidedown.transform(toFlip)
response_obj.messages_to_send.append(donger + flippedmsg)
except Exception as e:
logger.error("Error in flip: " + str(e))
response_obj.messages_to_send.append("Sorry, I can't seem to flip right now, or you gave an invalid argument")
return response_obj
def return_alias():
alias_list = ["rage"]
return alias_list
def is_admin_command():
return False
def help_preview():
return "!rage <Optional:String>"
def help_text():
return "Rage flips the text or table because you really want the world to know that you're upset." | mpl-2.0 | -8,448,291,754,435,529,000 | 27.880952 | 118 | 0.641914 | false |
andreabedini/PyTables | tables/exceptions.py | 5 | 11309 | # -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: December 17, 2004
# Author: Francesc Alted - [email protected]
#
# $Id$
#
########################################################################
"""Declare exceptions and warnings that are specific to PyTables."""
from __future__ import absolute_import
import six
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
import os
import warnings
import traceback
class HDF5ExtError(RuntimeError):
"""A low level HDF5 operation failed.
This exception is raised the low level PyTables components used for
accessing HDF5 files. It usually signals that something is not
going well in the HDF5 library or even at the Input/Output level.
Errors in the HDF5 C library may be accompanied by an extensive
HDF5 back trace on standard error (see also
:func:`tables.silence_hdf5_messages`).
.. versionchanged:: 2.4
Parameters
----------
message
error message
h5bt
This parameter (keyword only) controls the HDF5 back trace
handling. Any keyword arguments other than h5bt is ignored.
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
* if not set (or set to None) the default policy is used
(see :attr:`HDF5ExtError.DEFAULT_H5_BACKTRACE_POLICY`)
"""
# NOTE: in order to avoid circular dependencies between modules the
# _dump_h5_backtrace method is set at initialization time in
# the utilsExtenion.
_dump_h5_backtrace = None
DEFAULT_H5_BACKTRACE_POLICY = "VERBOSE"
"""Default policy for HDF5 backtrace handling
* if set to False the HDF5 back trace is ignored and the
:attr:`HDF5ExtError.h5backtrace` attribute is set to None
* if set to True the back trace is retrieved from the HDF5
library and stored in the :attr:`HDF5ExtError.h5backtrace`
attribute as a list of tuples
* if set to "VERBOSE" (default) the HDF5 back trace is
stored in the :attr:`HDF5ExtError.h5backtrace` attribute
and also included in the string representation of the
exception
This parameter can be set using the
:envvar:`PT_DEFAULT_H5_BACKTRACE_POLICY` environment variable.
Allowed values are "IGNORE" (or "FALSE"), "SAVE" (or "TRUE") and
"VERBOSE" to set the policy to False, True and "VERBOSE"
respectively. The special value "DEFAULT" can be used to reset
the policy to the default value
.. versionadded:: 2.4
"""
@classmethod
def set_policy_from_env(cls):
envmap = {
"IGNORE": False,
"FALSE": False,
"SAVE": True,
"TRUE": True,
"VERBOSE": "VERBOSE",
"DEFAULT": "VERBOSE",
}
oldvalue = cls.DEFAULT_H5_BACKTRACE_POLICY
envvalue = os.environ.get("PT_DEFAULT_H5_BACKTRACE_POLICY", "DEFAULT")
try:
newvalue = envmap[envvalue.upper()]
except KeyError:
warnings.warn("Invalid value for the environment variable "
"'PT_DEFAULT_H5_BACKTRACE_POLICY'. The default "
"policy for HDF5 back trace management in PyTables "
"will be: '%s'" % oldvalue)
else:
cls.DEFAULT_H5_BACKTRACE_POLICY = newvalue
return oldvalue
def __init__(self, *args, **kargs):
super(HDF5ExtError, self).__init__(*args)
self._h5bt_policy = kargs.get('h5bt', self.DEFAULT_H5_BACKTRACE_POLICY)
if self._h5bt_policy and self._dump_h5_backtrace is not None:
self.h5backtrace = self._dump_h5_backtrace()
"""HDF5 back trace.
Contains the HDF5 back trace as a (possibly empty) list of
tuples. Each tuple has the following format::
(filename, line number, function name, text)
Depending on the value of the *h5bt* parameter passed to the
initializer the h5backtrace attribute can be set to None.
This means that the HDF5 back trace has been simply ignored
(not retrieved from the HDF5 C library error stack) or that
there has been an error (silently ignored) during the HDF5 back
trace retrieval.
.. versionadded:: 2.4
See Also
--------
traceback.format_list : :func:`traceback.format_list`
"""
# XXX: check _dump_h5_backtrace failures
else:
self.h5backtrace = None
def __str__(self):
"""Returns a sting representation of the exception.
The actual result depends on policy set in the initializer
:meth:`HDF5ExtError.__init__`.
.. versionadded:: 2.4
"""
verbose = bool(self._h5bt_policy in ('VERBOSE', 'verbose'))
if verbose and self.h5backtrace:
bt = "\n".join([
"HDF5 error back trace\n",
self.format_h5_backtrace(),
"End of HDF5 error back trace"
])
if len(self.args) == 1 and isinstance(self.args[0], six.string_types):
msg = super(HDF5ExtError, self).__str__()
msg = "%s\n\n%s" % (bt, msg)
elif self.h5backtrace[-1][-1]:
msg = "%s\n\n%s" % (bt, self.h5backtrace[-1][-1])
else:
msg = bt
else:
msg = super(HDF5ExtError, self).__str__()
return msg
def format_h5_backtrace(self, backtrace=None):
"""Convert the HDF5 trace back represented as a list of tuples.
(see :attr:`HDF5ExtError.h5backtrace`) into a string.
.. versionadded:: 2.4
"""
if backtrace is None:
backtrace = self.h5backtrace
if backtrace is None:
return 'No HDF5 back trace available'
else:
return ''.join(traceback.format_list(backtrace))
# Initialize the policy for HDF5 back trace handling
HDF5ExtError.set_policy_from_env()
# The following exceptions are concretions of the ``ValueError`` exceptions
# raised by ``file`` objects on certain operations.
class ClosedNodeError(ValueError):
"""The operation can not be completed because the node is closed.
For instance, listing the children of a closed group is not allowed.
"""
pass
class ClosedFileError(ValueError):
"""The operation can not be completed because the hosting file is closed.
For instance, getting an existing node from a closed file is not
allowed.
"""
pass
class FileModeError(ValueError):
"""The operation can not be carried out because the mode in which the
hosting file is opened is not adequate.
For instance, removing an existing leaf from a read-only file is not
allowed.
"""
pass
class NodeError(AttributeError, LookupError):
"""Invalid hierarchy manipulation operation requested.
This exception is raised when the user requests an operation on the
hierarchy which can not be run because of the current layout of the
tree. This includes accessing nonexistent nodes, moving or copying
or creating over an existing node, non-recursively removing groups
with children, and other similarly invalid operations.
A node in a PyTables database cannot be simply overwritten by
replacing it. Instead, the old node must be removed explicitely
before another one can take its place. This is done to protect
interactive users from inadvertedly deleting whole trees of data by
a single erroneous command.
"""
pass
class NoSuchNodeError(NodeError):
"""An operation was requested on a node that does not exist.
This exception is raised when an operation gets a path name or a
``(where, name)`` pair leading to a nonexistent node.
"""
pass
class UndoRedoError(Exception):
"""Problems with doing/redoing actions with Undo/Redo feature.
This exception indicates a problem related to the Undo/Redo
mechanism, such as trying to undo or redo actions with this
mechanism disabled, or going to a nonexistent mark.
"""
pass
class UndoRedoWarning(Warning):
"""Issued when an action not supporting Undo/Redo is run.
This warning is only shown when the Undo/Redo mechanism is enabled.
"""
pass
class NaturalNameWarning(Warning):
"""Issued when a non-pythonic name is given for a node.
This is not an error and may even be very useful in certain
contexts, but one should be aware that such nodes cannot be
accessed using natural naming (instead, ``getattr()`` must be
used explicitly).
"""
pass
class PerformanceWarning(Warning):
"""Warning for operations which may cause a performance drop.
This warning is issued when an operation is made on the database
which may cause it to slow down on future operations (i.e. making
the node tree grow too much).
"""
pass
class FlavorError(ValueError):
"""Unsupported or unavailable flavor or flavor conversion.
This exception is raised when an unsupported or unavailable flavor
is given to a dataset, or when a conversion of data between two
given flavors is not supported nor available.
"""
pass
class FlavorWarning(Warning):
"""Unsupported or unavailable flavor conversion.
This warning is issued when a conversion of data between two given
flavors is not supported nor available, and raising an error would
render the data inaccessible (e.g. on a dataset of an unavailable
flavor in a read-only file).
See the `FlavorError` class for more information.
"""
pass
class FiltersWarning(Warning):
"""Unavailable filters.
This warning is issued when a valid filter is specified but it is
not available in the system. It may mean that an available default
filter is to be used instead.
"""
pass
class OldIndexWarning(Warning):
"""Unsupported index format.
This warning is issued when an index in an unsupported format is
found. The index will be marked as invalid and will behave as if
doesn't exist.
"""
pass
class DataTypeWarning(Warning):
"""Unsupported data type.
This warning is issued when an unsupported HDF5 data type is found
(normally in a file created with other tool than PyTables).
"""
pass
class ExperimentalFeatureWarning(Warning):
"""Generic warning for experimental features.
This warning is issued when using a functionality that is still
experimental and that users have to use with care.
"""
pass
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| bsd-3-clause | 6,151,312,810,610,433,000 | 28.071979 | 82 | 0.643824 | false |
hlin117/statsmodels | examples/python/regression_plots.py | 33 | 9585 |
## Regression Plots
from __future__ import print_function
from statsmodels.compat import lzip
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.formula.api import ols
### Duncan's Prestige Dataset
#### Load the Data
# We can use a utility function to load any R dataset available from the great <a href="http://vincentarelbundock.github.com/Rdatasets/">Rdatasets package</a>.
prestige = sm.datasets.get_rdataset("Duncan", "car", cache=True).data
prestige.head()
prestige_model = ols("prestige ~ income + education", data=prestige).fit()
print(prestige_model.summary())
#### Influence plots
# Influence plots show the (externally) studentized residuals vs. the leverage of each observation as measured by the hat matrix.
#
# Externally studentized residuals are residuals that are scaled by their standard deviation where
#
# $$var(\\hat{\epsilon}_i)=\hat{\sigma}^2_i(1-h_{ii})$$
#
# with
#
# $$\hat{\sigma}^2_i=\frac{1}{n - p - 1 \;\;}\sum_{j}^{n}\;\;\;\forall \;\;\; j \neq i$$
#
# $n$ is the number of observations and $p$ is the number of regressors. $h_{ii}$ is the $i$-th diagonal element of the hat matrix
#
# $$H=X(X^{\;\prime}X)^{-1}X^{\;\prime}$$
#
# The influence of each point can be visualized by the criterion keyword argument. Options are Cook's distance and DFFITS, two measures of influence.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.influence_plot(prestige_model, ax=ax, criterion="cooks")
# As you can see there are a few worrisome observations. Both contractor and reporter have low leverage but a large residual. <br />
# RR.engineer has small residual and large leverage. Conductor and minister have both high leverage and large residuals, and, <br />
# therefore, large influence.
#### Partial Regression Plots
# Since we are doing multivariate regressions, we cannot just look at individual bivariate plots to discern relationships. <br />
# Instead, we want to look at the relationship of the dependent variable and independent variables conditional on the other <br />
# independent variables. We can do this through using partial regression plots, otherwise known as added variable plots. <br />
#
# In a partial regression plot, to discern the relationship between the response variable and the $k$-th variabe, we compute <br />
# the residuals by regressing the response variable versus the independent variables excluding $X_k$. We can denote this by <br />
# $X_{\sim k}$. We then compute the residuals by regressing $X_k$ on $X_{\sim k}$. The partial regression plot is the plot <br />
# of the former versus the latter residuals. <br />
#
# The notable points of this plot are that the fitted line has slope $\beta_k$ and intercept zero. The residuals of this plot <br />
# are the same as those of the least squares fit of the original model with full $X$. You can discern the effects of the <br />
# individual data values on the estimation of a coefficient easily. If obs_labels is True, then these points are annotated <br />
# with their observation label. You can also see the violation of underlying assumptions such as homooskedasticity and <br />
# linearity.
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("prestige", "income", ["income", "education"], data=prestige, ax=ax)
ax = fig.axes[0]
ax.set_xlim(-2e-15, 1e-14)
ax.set_ylim(-25, 30);
fix, ax = plt.subplots(figsize=(12,14))
fig = sm.graphics.plot_partregress("prestige", "income", ["education"], data=prestige, ax=ax)
# As you can see the partial regression plot confirms the influence of conductor, minister, and RR.engineer on the partial relationship between income and prestige. The cases greatly decrease the effect of income on prestige. Dropping these cases confirms this.
subset = ~prestige.index.isin(["conductor", "RR.engineer", "minister"])
prestige_model2 = ols("prestige ~ income + education", data=prestige, subset=subset).fit()
print(prestige_model2.summary())
# For a quick check of all the regressors, you can use plot_partregress_grid. These plots will not label the <br />
# points, but you can use them to identify problems and then use plot_partregress to get more information.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(prestige_model, fig=fig)
#### Component-Component plus Residual (CCPR) Plots
# The CCPR plot provides a way to judge the effect of one regressor on the <br />
# response variable by taking into account the effects of the other <br />
# independent variables. The partial residuals plot is defined as <br />
# $\text{Residuals} + B_iX_i \text{ }\text{ }$ versus $X_i$. The component adds $B_iX_i$ versus <br />
# $X_i$ to show where the fitted line would lie. Care should be taken if $X_i$ <br />
# is highly correlated with any of the other independent variables. If this <br />
# is the case, the variance evident in the plot will be an underestimate of <br />
# the true variance.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_ccpr(prestige_model, "education", ax=ax)
# As you can see the relationship between the variation in prestige explained by education conditional on income seems to be linear, though you can see there are some observations that are exerting considerable influence on the relationship. We can quickly look at more than one variable by using plot_ccpr_grid.
fig = plt.figure(figsize=(12, 8))
fig = sm.graphics.plot_ccpr_grid(prestige_model, fig=fig)
#### Regression Plots
# The plot_regress_exog function is a convenience function that gives a 2x2 plot containing the dependent variable and fitted values with confidence intervals vs. the independent variable chosen, the residuals of the model vs. the chosen independent variable, a partial regression plot, and a CCPR plot. This function can be used for quickly checking modeling assumptions with respect to a single regressor.
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_regress_exog(prestige_model, "education", fig=fig)
#### Fit Plot
# The plot_fit function plots the fitted values versus a chosen independent variable. It includes prediction confidence intervals and optionally plots the true dependent variable.
fig, ax = plt.subplots(figsize=(12, 8))
fig = sm.graphics.plot_fit(prestige_model, "education", ax=ax)
### Statewide Crime 2009 Dataset
# Compare the following to http://www.ats.ucla.edu/stat/stata/webbooks/reg/chapter4/statareg_self_assessment_answers4.htm
#
# Though the data here is not the same as in that example. You could run that example by uncommenting the necessary cells below.
#dta = pd.read_csv("http://www.stat.ufl.edu/~aa/social/csv_files/statewide-crime-2.csv")
#dta = dta.set_index("State", inplace=True).dropna()
#dta.rename(columns={"VR" : "crime",
# "MR" : "murder",
# "M" : "pctmetro",
# "W" : "pctwhite",
# "H" : "pcths",
# "P" : "poverty",
# "S" : "single"
# }, inplace=True)
#
#crime_model = ols("murder ~ pctmetro + poverty + pcths + single", data=dta).fit()
dta = sm.datasets.statecrime.load_pandas().data
crime_model = ols("murder ~ urban + poverty + hs_grad + single", data=dta).fit()
print(crime_model.summary())
#### Partial Regression Plots
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(crime_model, fig=fig)
fig, ax = plt.subplots(figsize=(12,8))
fig = sm.graphics.plot_partregress("murder", "hs_grad", ["urban", "poverty", "single"], ax=ax, data=dta)
#### Leverage-Resid<sup>2</sup> Plot
# Closely related to the influence_plot is the leverage-resid<sup>2</sup> plot.
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.plot_leverage_resid2(crime_model, ax=ax)
#### Influence Plot
fig, ax = plt.subplots(figsize=(8,6))
fig = sm.graphics.influence_plot(crime_model, ax=ax)
#### Using robust regression to correct for outliers.
# Part of the problem here in recreating the Stata results is that M-estimators are not robust to leverage points. MM-estimators should do better with this examples.
from statsmodels.formula.api import rlm
rob_crime_model = rlm("murder ~ urban + poverty + hs_grad + single", data=dta,
M=sm.robust.norms.TukeyBiweight(3)).fit(conv="weights")
print(rob_crime_model.summary())
#rob_crime_model = rlm("murder ~ pctmetro + poverty + pcths + single", data=dta, M=sm.robust.norms.TukeyBiweight()).fit(conv="weights")
#print(rob_crime_model.summary())
# There aren't yet an influence diagnostics as part of RLM, but we can recreate them. (This depends on the status of [issue #888](https://github.com/statsmodels/statsmodels/issues/808))
weights = rob_crime_model.weights
idx = weights > 0
X = rob_crime_model.model.exog[idx]
ww = weights[idx] / weights[idx].mean()
hat_matrix_diag = ww*(X*np.linalg.pinv(X).T).sum(1)
resid = rob_crime_model.resid
resid2 = resid**2
resid2 /= resid2.sum()
nobs = int(idx.sum())
hm = hat_matrix_diag.mean()
rm = resid2.mean()
from statsmodels.graphics import utils
fig, ax = plt.subplots(figsize=(12,8))
ax.plot(resid2[idx], hat_matrix_diag, 'o')
ax = utils.annotate_axes(range(nobs), labels=rob_crime_model.model.data.row_labels[idx],
points=lzip(resid2[idx], hat_matrix_diag), offset_points=[(-5,5)]*nobs,
size="large", ax=ax)
ax.set_xlabel("resid2")
ax.set_ylabel("leverage")
ylim = ax.get_ylim()
ax.vlines(rm, *ylim)
xlim = ax.get_xlim()
ax.hlines(hm, *xlim)
ax.margins(0,0)
| bsd-3-clause | -5,553,750,183,334,910,000 | 40.314655 | 407 | 0.71445 | false |
Jmainguy/ansible-modules-core | utilities/logic/include_vars.py | 28 | 1982 | # -*- mode: python -*-
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
author: "Benno Joy (@bennojoy)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
options:
file:
version_added: "2.2"
description:
- The file name from which variables should be loaded.
- If the path is relative, it will look for the file in vars/ subdirectory of a role or relative to playbook.
name:
version_added: "2.2"
description:
- The name of a variable into which assign the included vars, if omitted (null) they will be made top level vars.
default: null
free-form:
description:
- This module allows you to specify the 'file' option directly w/o any other options.
notes:
- The file is always required either as the explicit option or using the free-form.
version_added: "1.4"
'''
EXAMPLES = """
# Include vars of stuff.yml into the 'stuff' variable (2.2).
- include_vars:
file: stuff.yml
name: stuff
# Conditionally decide to load in variables into 'plans' when x is 0, otherwise do not. (2.2)
- include_vars: file=contingency_plan.yml name=plans
when: x == 0
# Load a variable file based on the OS type, or a default if not found.
- include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_distribution }}.yml"
- "{{ ansible_os_family }}.yml"
- "default.yml"
# bare include (free-form)
- include_vars: myvars.yml
"""
| gpl-3.0 | 8,759,559,806,021,761,000 | 33.77193 | 181 | 0.702321 | false |
alekz112/statsmodels | statsmodels/tsa/statespace/model.py | 6 | 3282 | """
State Space Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from .representation import Representation
from .kalman_filter import KalmanFilter
import statsmodels.tsa.base.tsa_model as tsbase
class Model(KalmanFilter, Representation, tsbase.TimeSeriesModel):
"""
State space representation of a time series process, with Kalman filter and
Statsmodels integration.
This intermediate class joins the state space representation and filtering
classes with the Statsmodels `TimeSeriesModel`.
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
k_states : int
The dimension of the unobserved state process.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k. Default is no
exogenous regressors.
dates : array-like of datetime, optional
An array-like object of datetime objects. If a Pandas object is given
for endog, it is assumed to have a DateIndex.
freq : str, optional
The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',
'M', 'A', or 'Q'. This is optional if dates are given.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices, for Kalman filtering options, for Kalman smoothing
options, or for Simulation smoothing options.
See `Representation`, `KalmanFilter`, and `KalmanSmoother` for more
details.
See Also
--------
statsmodels.tsa.statespace.tsa.base.tsa_model.TimeSeriesModel
statsmodels.tsa.statespace.mlemodel.MLEModel
statsmodels.tsa.statespace.kalman_filter.KalmanFilter
statsmodels.tsa.statespace.representation.Representation
"""
def __init__(self, endog, k_states, exog=None, dates=None, freq=None,
**kwargs):
# Initialize the model base
tsbase.TimeSeriesModel.__init__(self, endog=endog, exog=exog,
dates=dates, freq=freq, missing='none')
# Need to modify the endog variable
endog = self.endog
# Base class may allow 1-dim data, whereas we need 2-dim
if endog.ndim == 1:
endog.shape = (endog.shape[0], 1) # this will be C-contiguous
# Base classes data may be either C-ordered or F-ordered - we want it
# to be C-ordered since it will also be in shape (nobs, k_endog), and
# then we can just transpose it.
if not endog.flags['C_CONTIGUOUS']:
# TODO this breaks the reference link between the model endog
# variable and the original object - do we need a warn('')?
# This will happen often with Pandas DataFrames, which are often
# Fortran-ordered and in the long format
endog = np.ascontiguousarray(endog)
# Now endog is C-ordered and in long format (nobs x k_endog). To get
# F-ordered and in wide format just need to transpose.
endog = endog.T
# Initialize the statespace representation
super(Model, self).__init__(endog.shape[0], k_states, **kwargs)
# Bind the data to the model
self.bind(endog)
| bsd-3-clause | -7,109,958,000,179,251,000 | 38.542169 | 79 | 0.659659 | false |
mindw/numpy | numpy/distutils/command/build_scripts.py | 264 | 1731 | """ Modified version of build_scripts that handles building scripts from functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run (self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
| bsd-3-clause | 7,410,106,554,834,120,000 | 32.941176 | 83 | 0.608319 | false |
benhc123/p2pool | p2pool/test/test_p2p.py | 269 | 2724 | import random
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.trial import unittest
from p2pool import networks, p2p
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_sharereq(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
def handle_share_hashes(self, hashes, peer):
peer.get_shares(
hashes=[hashes[0]],
parents=5,
stops=[],
).chainDeferred(self.df)
df = defer.Deferred()
n = MyNode(df)
n.start()
try:
yield df
finally:
yield n.stop()
@defer.inlineCallbacks
def test_tx_limit(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
self.sent_time = 0
@defer.inlineCallbacks
def got_conn(self, conn):
p2p.Node.got_conn(self, conn)
yield deferral.sleep(.5)
new_mining_txs = dict(self.mining_txs_var.value)
for i in xrange(3):
huge_tx = dict(
version=0,
tx_ins=[],
tx_outs=[dict(
value=0,
script='x'*900000,
)],
lock_time=i,
)
new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
self.mining_txs_var.set(new_mining_txs)
self.sent_time = reactor.seconds()
def lost_conn(self, conn, reason):
self.df.callback(None)
try:
p2p.Protocol.max_remembered_txs_size *= 10
df = defer.Deferred()
n = MyNode(df)
n.start()
yield df
if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
yield n.stop()
finally:
p2p.Protocol.max_remembered_txs_size //= 10
| gpl-3.0 | 9,192,467,397,528,439,000 | 33.481013 | 130 | 0.459618 | false |
sengupta/spritzbot | spritzbot/processor.py | 1 | 2789 | import os
import re
import imp
import json
class TweetProcessor:
plugins = {}
commands = {}
base_path = os.path.dirname(os.path.realpath(__file__))
plugin_path = os.path.join(base_path, "plugins")
def __init__(self):
self.load_plugins()
def load_plugins(self):
"""Loads plugins and associated commands."""
# Filename pattern that we want to load.
re_plugin = re.compile('[^.].*\.py$')
for plugin_module in os.listdir(self.plugin_path):
if re_plugin.match(plugin_module):
# Get the module's name
name = plugin_module[:-3]
plugin_info = imp.find_module(name, [self.plugin_path])
plugin = imp.load_module(name, *plugin_info)
self.plugins.update({name:plugin})
for command in plugin.commands():
status_type = command['type']
triggers = command['triggers']
if self.commands.has_key(status_type):
self.commands[status_type].append({'plugin':name,'triggers':triggers})
else:
self.commands[status_type] = [{'plugin':name,'triggers':triggers}]
def process(self, data):
"""Processes the status/tweet and hands over to appropriate plugins."""
try:
status = json.loads(data)
except:
return None
for status_type in self.commands:
# see if it is of typs 'text' or 'friends' or something else
if status.has_key(status_type):
# if it is, find out the modules associated with it
commands = self.commands[status_type]
# for each module that handles say 'text',
for command in commands:
# for triggers that should send data to process
# in that module,
triggers = command['triggers']
for t in triggers:
# compiled regex match:
if t.match(data):
# currently, we're just printing the output
# later there will be facility to reply
# or better - send a tweepy api object to the
# processing module so it can take actions
# independently.
print self.plugins[command['plugin']].process(status)
if __name__ == '__main__':
tp = TweetProcessor()
tweet = r"""{"text":"Chai craving!","id":190207791800135680}"""
friends = r"""{"friends":[123,456,789]}"""
tp.process(friends)
tp.process(tweet)
| bsd-2-clause | 4,119,529,581,569,513,500 | 37.736111 | 94 | 0.512011 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_peer_express_route_circuit_connections_operations.py | 1 | 9352 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations:
"""PeerExpressRouteCircuitConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
connection_name: str,
**kwargs
) -> "_models.PeerExpressRouteCircuitConnection":
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs
) -> AsyncIterable["_models.PeerExpressRouteCircuitConnectionListResult"]:
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PeerExpressRouteCircuitConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections'} # type: ignore
| mit | 3,648,150,955,429,748,700 | 48.744681 | 231 | 0.655368 | false |
aringh/odl | odl/discr/partition.py | 1 | 51284 | # Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Partitons of interval products based on rectilinear grids.
A partition of a set is a finite collection of nonempty, pairwise
disjoint subsets whose union is the original set. The partitions
considered here are based on hypercubes, i.e. the tensor products
of partitions of intervals.
"""
from __future__ import print_function, division, absolute_import
from builtins import object
import numpy as np
from odl.discr.grid import RectGrid, uniform_grid_fromintv
from odl.set import IntervalProd
from odl.util import (
normalized_index_expression, normalized_nodes_on_bdry,
normalized_scalar_param_list, safe_int_conv,
signature_string, indent, array_str, npy_printoptions)
__all__ = ('RectPartition', 'uniform_partition_fromintv',
'uniform_partition_fromgrid', 'uniform_partition',
'nonuniform_partition')
class RectPartition(object):
"""Rectangular partition by hypercubes based on `RectGrid`.
In 1d, a partition of an interval is implicitly defined by a
collection of points x[0], ..., x[N-1] (a grid) which are chosen to
lie in the center of the subintervals. The i-th subinterval is thus
given by
``I[i] = [(x[i-1]+x[i])/2, (x[i]+x[i+1])/2]``
"""
def __init__(self, intv_prod, grid):
"""Initialize a new instance.
Parameters
----------
intv_prod : `IntervalProd`
Set to be partitioned
grid : `RectGrid`
Spatial points supporting the partition. They must be
contained in ``intv_prod``.
"""
super(RectPartition, self).__init__()
if not isinstance(intv_prod, IntervalProd):
raise TypeError('{!r} is not an IntervalProd instance'
''.format(intv_prod))
if not isinstance(grid, RectGrid):
raise TypeError('{!r} is not a RectGrid instance'
''.format(grid))
# More conclusive error than the one from contains_set
if intv_prod.ndim != grid.ndim:
raise ValueError('interval product {} is {}-dimensional while '
'grid {} is {}-dimensional'
''.format(intv_prod, intv_prod.ndim,
grid, grid.ndim))
if not intv_prod.contains_set(grid):
raise ValueError('{} is not contained in {}'
''.format(grid, intv_prod))
self.__set = intv_prod
self.__grid = grid
# Initialize the cell boundaries, the defining property of partitions
bdry_vecs = []
for ax, vec in enumerate(self.grid.coord_vectors):
bdry = np.empty(len(vec) + 1)
bdry[1:-1] = (vec[1:] + vec[:-1]) / 2.0
bdry[0] = self.min()[ax]
bdry[-1] = self.max()[ax]
bdry_vecs.append(bdry)
self.__cell_boundary_vecs = tuple(bdry_vecs)
# Initialize nodes_on_bdry
left_on_bdry = np.isclose(self.grid.min_pt, self.set.min_pt)[:, None]
right_on_bdry = np.isclose(self.grid.max_pt, self.set.max_pt)[:, None]
on_bdry = np.hstack([left_on_bdry, right_on_bdry]).tolist()
self.__nodes_on_bdry = tuple(tuple(r) for r in on_bdry)
@property
def cell_boundary_vecs(self):
"""Return the cell boundaries as coordinate vectors.
Examples
--------
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))
"""
return self.__cell_boundary_vecs
@property
def set(self):
"""Partitioned set, an `IntervalProd`."""
return self.__set
@property
def nodes_on_bdry(self):
"""Encoding of grid points lying on the boundary.
Examples
--------
Using global option (default ``False``):
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3])
>>> part.nodes_on_bdry
False
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
``False`` in axis 0, ``True`` in axis 1:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[False, True])
>>> part.nodes_on_bdry
(False, True)
In axis 0, ``False`` left and ``True`` right, in axis 1 ``False``:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[[False, True],
... False])
>>> part.nodes_on_bdry
((False, True), False)
"""
if self.size == 0:
return True
nodes_on_bdry = []
for on_bdry in self.nodes_on_bdry_byaxis:
left, right = on_bdry
if left == right:
nodes_on_bdry.append(left)
else:
nodes_on_bdry.append((left, right))
if all(on_bdry == nodes_on_bdry[0] for on_bdry in nodes_on_bdry[1:]):
return nodes_on_bdry[0]
else:
return tuple(nodes_on_bdry)
@property
def nodes_on_bdry_byaxis(self):
"""Nested tuple of booleans for `nodes_on_bdry`.
This attribute is equivalent to `nodes_on_bdry`, but always in
the form of a nested tuple.
"""
return self.__nodes_on_bdry
# IntervalProd related pass-through methods and derived properties
# min, max and extent are for duck-typing purposes
@property
def min_pt(self):
"""Minimum coordinates of the partitioned set."""
return self.set.min_pt
@property
def max_pt(self):
"""Maximum coordinates of the partitioned set."""
return self.set.max_pt
@property
def mid_pt(self):
"""Midpoint of the partitioned set."""
return self.set.mid_pt
def min(self):
"""Return the minimum point of the partitioned set.
See Also
--------
odl.set.domain.IntervalProd.min
"""
return self.set.min()
def max(self):
"""Return the maximum point of the partitioned set.
See Also
--------
odl.set.domain.IntervalProd.max
"""
return self.set.max()
@property
def extent(self):
"""Return a vector containing the total extent (max - min)."""
return self.set.extent
@property
def grid(self):
"""`RectGrid` defining this partition."""
return self.__grid
# RectGrid related pass-through methods and derived properties
@property
def is_uniform_byaxis(self):
"""Boolean tuple showing uniformity of ``self.grid`` per axis.
Examples
--------
>>> part = nonuniform_partition([0, 1, 3], [1, 2, 3])
>>> part.is_uniform_byaxis
(False, True)
"""
return self.grid.is_uniform_byaxis
@property
def is_uniform(self):
"""``True`` if `grid` is uniform."""
return self.grid.is_uniform
@property
def has_isotropic_cells(self):
"""``True`` if `grid` is uniform and `cell sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False
"""
return self.is_uniform and np.allclose(self.cell_sides[:-1],
self.cell_sides[1:])
@property
def ndim(self):
"""Number of dimensions."""
return self.grid.ndim
@property
def shape(self):
"""Number of cells per axis, equal to ``self.grid.shape``."""
return self.grid.shape
@property
def size(self):
"""Total number of cells, equal to ``self.grid.size``."""
return self.grid.size
def __len__(self):
"""Return ``len(self)``.
Total number of cells along the first dimension.
Examples
--------
>>> partition = odl.uniform_partition([0, 0, 0],
... [1, 1, 1],
... shape=(2, 3, 4))
>>> len(partition)
2
See Also
--------
size : The total number of cells.
"""
return len(self.grid)
def points(self, order='C'):
"""Return the sampling grid points.
See Also
--------
RectGrid.points
"""
return self.grid.points(order)
@property
def meshgrid(self):
"""Return the sparse meshgrid of sampling points."""
return self.grid.meshgrid
@property
def coord_vectors(self):
"""Coordinate vectors of the grid."""
return self.grid.coord_vectors
# Further derived methods / properties
@property
def boundary_cell_fractions(self):
"""Return a tuple of contained fractions of boundary cells.
Since the outermost grid points can have any distance to the
boundary of the partitioned set, the "natural" outermost cell
around these points can either be cropped or extended. This
property is a tuple of (float, float) tuples, one entry per
dimension, where the fractions of the left- and rightmost
cells inside the set are stored. If a grid point lies exactly
on the boundary, the value is 1/2 since the cell is cut in half.
Otherwise, any value larger than 1/2 is possible.
Returns
-------
on_bdry : tuple of 2-tuples of floats
Each 2-tuple contains the fraction of the leftmost
(first entry) and rightmost (second entry) cell in the
partitioned set in the corresponding dimension.
See Also
--------
cell_boundary_vecs
Examples
--------
We create a partition of the rectangle [0, 1.5] x [-2, 2] with
the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the
boundary would be:
[-0.5, 0.5] and [0.5, 1.5] in the first axis
[-1.5, -0.5] and [1, 3] in the second axis
Thus, in the first axis, the fractions contained in [0, 1.5]
are 0.5 and 1, and in the second axis, [-2, 2] contains the
fractions 1.5 and 0.5.
>>> rect = odl.IntervalProd([0, -2], [1.5, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.boundary_cell_fractions
((0.5, 1.0), (1.5, 0.5))
"""
frac_list = []
for ax, (cvec, bmin, bmax) in enumerate(zip(
self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)):
# Degenerate axes have a value of 1.0 (this is used as weight
# in integration formulas later)
if len(cvec) == 1:
frac_list.append((1.0, 1.0))
else:
left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0])
right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2])
frac_list.append((left_frac, right_frac))
return tuple(frac_list)
@property
def cell_sizes_vecs(self):
"""Return the cell sizes as coordinate vectors.
Returns
-------
csizes : tuple of `numpy.ndarray`'s
The cell sizes per axis. The length of the vectors is the
same as the corresponding ``grid.coord_vectors``.
For axes with 1 grid point, cell size is set to 0.0.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This
implies that the cell boundaries are given as
[0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors
are [0.5, 0.5] x [0.5, 1.5, 1]:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))
>>> part.cell_sizes_vecs
(array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ]))
"""
csizes = []
for ax, cvec in enumerate(self.grid.coord_vectors):
if len(cvec) == 1:
csizes.append(np.array([0.0]))
else:
csize = np.empty_like(cvec)
csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0
csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax]
csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2
csizes.append(csize)
return tuple(csizes)
@property
def cell_sides(self):
"""Side lengths of all 'inner' cells of a uniform partition.
Only defined if ``self.grid`` is uniform.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
3 x 3 cells, where the grid points lie on the boundary. This
means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2],
i.e. the inner cell has side lengths 0.5 x 1.5:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3))
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_sides
array([ 0.5, 1.5])
"""
sides = self.grid.stride
sides[sides == 0] = self.extent[sides == 0]
return sides
@property
def cell_volume(self):
"""Volume of the 'inner' cells of a uniform partition.
Only defined if ``self.grid`` is uniform.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
3 x 3 cells, where the grid points lie on the boundary. This
means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2],
i.e. the inner cell has side lengths 0.5 x 1.5:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3))
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_sides
array([ 0.5, 1.5])
>>> part.cell_volume
0.75
"""
return 0.0 if self.size == 0 else float(np.prod(self.cell_sides))
def approx_equals(self, other, atol):
"""Return ``True`` in case of approximate equality.
Returns
-------
approx_eq : bool
``True`` if ``other`` is a `RectPartition` instance with
``self.set == other.set`` up to ``atol`` and
``self.grid == other.other`` up to ``atol``, ``False`` otherwise.
"""
if other is self:
return True
elif not isinstance(other, RectPartition):
return False
else:
return (self.set.approx_equals(other.set, atol=atol) and
self.grid.approx_equals(other.grid, atol=atol))
def __eq__(self, other):
"""Return ``self == other``."""
# Implemented separately for performance reasons
if other is self:
return True
# Optimized version for exact equality
return (type(other) is type(self) and
self.set == other.set and
self.grid == other.grid)
def __hash__(self):
"""Return ``hash(self)``."""
return hash((type(self), self.set, self.grid))
def __ne__(self, other):
"""Return ``self != other``."""
return not (self == other)
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object determining which parts of the partition to extract.
``None`` (new axis) and empty axes are not supported.
Examples
--------
Take every second grid point. Note that is is in general non-uniform:
>>> partition = odl.uniform_partition(0, 10, 10)
>>> partition[::2]
nonuniform_partition(
[ 0.5, 2.5, 4.5, 6.5, 8.5],
min_pt=0.0, max_pt=10.0
)
A more advanced example is:
>>> intvp = odl.IntervalProd([-1, 1, 4, 2], [3, 6, 5, 7])
>>> grid = odl.RectGrid([-1, 0, 3], [2, 4], [5], [2, 4, 7])
>>> part = odl.RectPartition(intvp, grid)
>>> part
nonuniform_partition(
[-1., 0., 3.],
[ 2., 4.],
[ 5.],
[ 2., 4., 7.],
min_pt=[-1., 1., 4., 2.], max_pt=[ 3., 6., 5., 7.]
)
Take an advanced slice (every second along the first axis,
the last in the last axis and everything in between):
>>> part[::2, ..., -1]
nonuniform_partition(
[-1., 3.],
[ 2., 4.],
[ 5.],
[ 7.],
min_pt=[-1. , 1. , 4. , 5.5], max_pt=[ 3., 6., 5., 7.]
)
Too few indices are filled up with an ellipsis from the right:
>>> part[1]
nonuniform_partition(
[ 0.],
[ 2., 4.],
[ 5.],
[ 2., 4., 7.],
min_pt=[-0.5, 1. , 4. , 2. ], max_pt=[ 1.5, 6. , 5. , 7. ]
)
Colons etc work as expected:
>>> part[:] == part
True
>>> part[:, :, :] == part
True
>>> part[...] == part
True
"""
# Special case of index list: slice along first axis
if isinstance(indices, list):
if indices == []:
new_min_pt = new_max_pt = []
else:
new_min_pt = [self.cell_boundary_vecs[0][:-1][indices][0]]
new_max_pt = [self.cell_boundary_vecs[0][1:][indices][-1]]
for cvec in self.cell_boundary_vecs[1:]:
new_min_pt.append(cvec[0])
new_max_pt.append(cvec[-1])
new_intvp = IntervalProd(new_min_pt, new_max_pt)
new_grid = self.grid[indices]
return RectPartition(new_intvp, new_grid)
indices = normalized_index_expression(indices, self.shape,
int_to_slice=True)
# Build the new partition
new_min_pt, new_max_pt = [], []
for cvec, idx in zip(self.cell_boundary_vecs, indices):
# Determine the subinterval min_pt and max_pt vectors. Take the
# first min_pt as new min_pt and the last max_pt as new max_pt.
if isinstance(idx, slice):
# Only use the slice to extract min and max without using
# the step size. This is in order for expressions like
# self[::2] to not change the maximum.
idx = slice(idx.start, idx.stop, None)
sub_min_pt = cvec[:-1][idx]
sub_max_pt = cvec[1:][idx]
new_min_pt.append(sub_min_pt[0])
new_max_pt.append(sub_max_pt[-1])
new_intvp = IntervalProd(new_min_pt, new_max_pt)
new_grid = self.grid[indices]
return RectPartition(new_intvp, new_grid)
def insert(self, index, *parts):
"""Return a copy with ``parts`` inserted before ``index``.
The given partitions are inserted (as a block) into ``self``,
yielding a new partition whose number of dimensions is the sum of
the numbers of dimensions of all involved partitions.
Note that no changes are made in-place.
Parameters
----------
index : int
Index of the dimension before which ``other`` is to
be inserted. Negative indices count backwards from
``self.ndim``.
part1, ..., partN : `RectPartition`
Partitions to be inserted into ``self``.
Returns
-------
newpart : `RectPartition`
The enlarged partition.
Examples
--------
>>> part1 = odl.uniform_partition([0, -1], [1, 2], (3, 3))
>>> part2 = odl.uniform_partition(0, 1, 5)
>>> part1.insert(1, part2)
uniform_partition([ 0., 0., -1.], [ 1., 1., 2.], (3, 5, 3))
See Also
--------
append
"""
if not all(isinstance(p, RectPartition) for p in parts):
raise TypeError('`parts` must all be `RectPartition` instances, '
'got ({})'
''.format(', '.join(repr(p) for p in parts)))
newgrid = self.grid.insert(index, *(p.grid for p in parts))
newset = self.set.insert(index, *(p.set for p in parts))
return RectPartition(newset, newgrid)
def append(self, *parts):
"""Insert ``parts`` at the end as a block.
Parameters
----------
part1, ..., partN : `RectPartition`
Partitions to be appended to ``self``.
Returns
-------
newpart : `RectPartition`
The enlarged partition.
Examples
--------
>>> part1 = odl.uniform_partition(-1, 2, 3)
>>> part2 = odl.uniform_partition(0, 1, 5)
>>> part1.append(part2)
uniform_partition([-1., 0.], [ 2., 1.], (3, 5))
>>> part1.append(part2, part2)
uniform_partition([-1., 0., 0.], [ 2., 1., 1.], (3, 5, 5))
See Also
--------
insert
"""
return self.insert(self.ndim, *parts)
def squeeze(self, axis=None):
"""Return the partition with removed degenerate (length 1) dimensions.
Parameters
----------
axis : None or index expression, optional
Subset of the axes to squeeze. Default: All axes.
Returns
-------
squeezed : `RectPartition`
Squeezed partition.
Examples
--------
>>> p = odl.uniform_partition([0, -1], [1, 2], (3, 1))
>>> p.squeeze()
uniform_partition(0.0, 1.0, 3)
The axis argument can be used to only squeeze some axes (if applicable)
>>> p.squeeze(axis=0)
uniform_partition([ 0., -1.], [ 1., 2.], (3, 1))
Notes
-----
This is not equivalent to
``RectPartiton(self.set.squeeze(), self.grid.squeeze())`` since the
definition of degenerate is different in sets and grids. This method
follow the definition used in grids, that is, an axis is degenerate if
it has only one element.
See Also
--------
RectGrid.squeeze
IntervalProd.squeeze
"""
if axis is None:
rng = range(self.ndim)
else:
rng = list(np.atleast_1d(np.arange(self.ndim)[axis]))
new_indcs = [i for i in range(self.ndim)
if i not in rng or self.grid.nondegen_byaxis[i]]
newset = self.set[new_indcs]
return RectPartition(newset, self.grid.squeeze(axis))
def index(self, value, floating=False):
"""Return the index of a value in the domain.
Parameters
----------
value : ``self.set`` element
Point whose index to find.
floating : bool, optional
If True, then the index should also give the position inside the
voxel. This is given by returning the integer valued index of the
voxel plus the distance from the left cell boundary as a fraction
of the full cell size.
Returns
-------
index : int, float, tuple of int or tuple of float
Index of the value, as counted from the left.
If ``self.ndim > 1`` the result is a tuple, else a scalar.
If ``floating=True`` the scalar is a float, else an int.
Examples
--------
Get the indices of start and end:
>>> p = odl.uniform_partition(0, 2, 5)
>>> p.index(0)
0
>>> p.index(2)
4
For points inside voxels, the index of the containing cell is returned:
>>> p.index(0.2)
0
By using the ``floating`` argument, partial positions inside the voxels
can instead be determined:
>>> p.index(0.2, floating=True)
0.5
These indices work with indexing, extracting the voxel in which the
point lies:
>>> p[p.index(0.1)]
uniform_partition(0.0, 0.4, 1)
The same principle also works in higher dimensions:
>>> p = uniform_partition([0, -1], [1, 2], (4, 1))
>>> p.index([0.5, 2])
(2, 0)
>>> p[p.index([0.5, 2])]
uniform_partition([ 0.5, -1. ], [ 0.75, 2. ], (1, 1))
"""
value = np.atleast_1d(self.set.element(value))
result = []
for val, cell_bdry_vec in zip(value, self.cell_boundary_vecs):
ind = np.searchsorted(cell_bdry_vec, val)
if floating:
if cell_bdry_vec[ind] == val:
# Value is on top of edge
result.append(float(ind))
else:
# interpolate between
csize = float(cell_bdry_vec[ind] - cell_bdry_vec[ind - 1])
result.append(ind - (cell_bdry_vec[ind] - val) / csize)
else:
if cell_bdry_vec[ind] == val and ind != len(cell_bdry_vec) - 1:
# Value is on top of edge, but not last edge
result.append(ind)
else:
result.append(ind - 1)
if self.ndim == 1:
result = result[0]
else:
result = tuple(result)
return result
@property
def byaxis(self):
"""Object to index ``self`` along axes.
Examples
--------
Indexing with integers or slices:
>>> p = odl.uniform_partition([0, 1, 2], [1, 3, 5], (3, 5, 6))
>>> p.byaxis[0]
uniform_partition(0.0, 1.0, 3)
>>> p.byaxis[1]
uniform_partition(1.0, 3.0, 5)
>>> p.byaxis[2]
uniform_partition(2.0, 5.0, 6)
>>> p.byaxis[:] == p
True
>>> p.byaxis[1:]
uniform_partition([ 1., 2.], [ 3., 5.], (5, 6))
Lists can be used to stack subpartitions arbitrarily:
>>> p.byaxis[[0, 2, 0]]
uniform_partition([ 0., 2., 0.], [ 1., 5., 1.], (3, 6, 3))
"""
partition = self
class RectPartitionByAxis(object):
"""Helper class for accessing `RectPartition` by axis."""
def __getitem__(self, indices):
"""Return ``self[indices]``."""
try:
iter(indices)
except TypeError:
# Slice or integer
slc = np.zeros(partition.ndim, dtype=object)
slc[indices] = slice(None)
squeeze_axes = np.where(slc == 0)[0]
newpart = partition[tuple(slc)].squeeze(squeeze_axes)
else:
# Sequence, stack together from single-integer indexing
indices = [int(i) for i in indices]
byaxis = partition.byaxis
parts = [byaxis[i] for i in indices]
if not parts:
newpart = uniform_partition([], [], ())
else:
newpart = parts[0].append(*(parts[1:]))
return newpart
def __repr__(self):
"""Return ``repr(self)``.
Examples
--------
>>> p = odl.uniform_partition(0, 1, 5)
>>> p.byaxis
uniform_partition(0, 1, 5).byaxis
"""
return '{!r}.byaxis'.format(partition)
return RectPartitionByAxis()
def __repr__(self):
"""Return ``repr(self)``."""
if self.ndim == 0:
return 'uniform_partition([], [], ())'
bdry_fracs = np.vstack(self.boundary_cell_fractions)
default_bdry_fracs = np.all(np.isclose(bdry_fracs, 0.5) |
np.isclose(bdry_fracs, 1.0))
# Get default shifts of min_pt and max_pt from corresponding
# grid points
csizes_l = np.fromiter((s[0] for s in self.cell_sizes_vecs),
dtype=float)
csizes_r = np.fromiter((s[-1] for s in self.cell_sizes_vecs),
dtype=float)
shift_l = ((bdry_fracs[:, 0].astype(float).squeeze() - 0.5) *
csizes_l)
shift_r = ((bdry_fracs[:, 1].astype(float).squeeze() - 0.5) *
csizes_r)
if self.is_uniform and default_bdry_fracs:
ctor = 'uniform_partition'
if self.ndim == 1:
posargs = [self.min_pt[0], self.max_pt[0], self.shape[0]]
posmod = [':.4', ':.4', '']
else:
posargs = [self.min_pt, self.max_pt, self.shape]
posmod = [array_str, array_str, '']
optargs = [('nodes_on_bdry', self.nodes_on_bdry, False)]
with npy_printoptions(precision=4):
sig_str = signature_string(posargs, optargs, mod=[posmod, ''])
return '{}({})'.format(ctor, sig_str)
else:
ctor = 'nonuniform_partition'
posargs = self.coord_vectors
posmod = array_str
optargs = []
# Defaults with and without nodes_on_bdry option
nodes_def_min_pt = self.grid.min_pt - shift_l
nodes_def_max_pt = self.grid.max_pt + shift_r
def_min_pt = self.grid.min_pt - 0.5 * csizes_l
def_max_pt = self.grid.max_pt + 0.5 * csizes_r
# Since min/max_pt and nodes_on_bdry are mutex, we need a
# couple of cases here
optmod = []
if (np.allclose(self.min_pt, nodes_def_min_pt) and
np.allclose(self.max_pt, nodes_def_max_pt)):
# Append nodes_on_bdry to list of optional args
optargs.append(('nodes_on_bdry', self.nodes_on_bdry, False))
optmod.append('')
else:
# Append min/max_pt to list of optional args if not
# default (need check manually because array comparison is
# ambiguous)
if not np.allclose(self.min_pt, def_min_pt):
if self.ndim == 1:
optargs.append(('min_pt', self.min_pt[0], None))
optmod.append(':.4')
else:
with npy_printoptions(precision=4):
optargs.append(
('min_pt', array_str(self.min_pt), ''))
optmod.append('!s')
if not np.allclose(self.max_pt, def_max_pt):
if self.ndim == 1:
optargs.append(('max_pt', self.max_pt[0], None))
optmod.append(':.4')
else:
with npy_printoptions(precision=4):
optargs.append(
('max_pt', array_str(self.max_pt), ''))
optmod.append('!s')
sig_str = signature_string(posargs, optargs, mod=[posmod, optmod],
sep=[',\n', ', ', ',\n'])
return '{}(\n{}\n)'.format(ctor, indent(sig_str))
def __str__(self):
"""Return ``str(self)``."""
return repr(self)
def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False):
"""Return a partition of an interval product into equally sized cells.
Parameters
----------
intv_prod : `IntervalProd`
Interval product to be partitioned
shape : int or sequence of ints
Number of nodes per axis. For 1d intervals, a single integer
can be specified.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``intv_prod.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
See Also
--------
uniform_partition_fromgrid
Examples
--------
By default, no grid points are placed on the boundary:
>>> interval = odl.IntervalProd(0, 1)
>>> part = odl.uniform_partition_fromintv(interval, 4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition_fromintv(interval, 3,
... nodes_on_bdry=True)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> rect = odl.IntervalProd([0, 0], [1, 1])
>>> part = odl.uniform_partition_fromintv(
... rect, (3, 3), nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry)
return RectPartition(intv_prod, grid)
def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None):
"""Return a partition of an interval product based on a given grid.
This method is complementary to `uniform_partition_fromintv` in that
it infers the set to be partitioned from a given grid and optional
parameters for ``min_pt`` and ``max_pt`` of the set.
Parameters
----------
grid : `RectGrid`
Grid on which the partition is based
min_pt, max_pt : float, sequence of floats, or dict, optional
Spatial points defining the lower/upper limits of the intervals
to be partitioned. The points can be specified in two ways:
float or sequence: The values are used directly as ``min_pt``
and/or ``max_pt``.
dict: Index-value pairs specifying an axis and a spatial
coordinate to be used in that axis. In axes which are not a key
in the dictionary, the coordinate for the vector is calculated
as::
min_pt = x[0] - (x[1] - x[0]) / 2
max_pt = x[-1] + (x[-1] - x[-2]) / 2
See ``Examples`` below.
In general, ``min_pt`` may not be larger than ``grid.min_pt``,
and ``max_pt`` not smaller than ``grid.max_pt`` in any component.
``None`` is equivalent to an empty dictionary, i.e. the values
are calculated in each dimension.
See Also
--------
uniform_partition_fromintv
Examples
--------
Have ``min_pt`` and ``max_pt`` of the bounding box automatically
calculated:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
>>> part = odl.uniform_partition_fromgrid(grid)
>>> part.cell_boundary_vecs
(array([-0.25, 0.25, 0.75, 1.25]),)
``min_pt`` and ``max_pt`` can be given explicitly:
>>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
Using dictionaries, selective axes can be explicitly set. The
keys refer to axes, the values to the coordinates to use:
>>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3))
>>> part = odl.uniform_partition_fromgrid(grid,
... min_pt={0: -1}, max_pt={-1: 3})
>>> part.cell_boundary_vecs[0]
array([-1. , 0.25, 0.75, 1.25])
>>> part.cell_boundary_vecs[1]
array([-0.25, 0.25, 0.75, 3. ])
"""
# Make dictionaries from `min_pt` and `max_pt` and fill with `None` where
# no value is given (taking negative indices into account)
if min_pt is None:
min_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(min_pt, 'items'): # array-like
min_pt = np.atleast_1d(min_pt)
min_pt = {i: float(v) for i, v in enumerate(min_pt)}
else:
min_pt.update({i: None for i in range(grid.ndim)
if i not in min_pt and i - grid.ndim not in min_pt})
if max_pt is None:
max_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(max_pt, 'items'):
max_pt = np.atleast_1d(max_pt)
max_pt = {i: float(v) for i, v in enumerate(max_pt)}
else:
max_pt.update({i: None for i in range(grid.ndim)
if i not in max_pt and i - grid.ndim not in max_pt})
# Set the values in the vectors by computing (None) or directly from the
# given vectors (otherwise).
min_pt_vec = np.empty(grid.ndim)
for ax, xmin in min_pt.items():
if xmin is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `min_pt` with '
'only 1 grid point'.format(ax))
min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2
else:
min_pt_vec[ax] = xmin
max_pt_vec = np.empty(grid.ndim)
for ax, xmax in max_pt.items():
if xmax is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `max_pt` with '
'only 1 grid point'.format(ax))
max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2
else:
max_pt_vec[ax] = xmax
return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid)
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None,
nodes_on_bdry=False):
"""Return a partition with equally sized cells.
Parameters
----------
min_pt, max_pt : float or sequence of float, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
shape : int or sequence of ints, optional
Number of nodes per axis. ``None`` entries mean
"compute the value".
cell_sides : float or sequence of floats, optional
Side length of the partition cells per axis. ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Notes
-----
In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``,
``shape`` and ``cell_sides`` must be given. If all four are
provided, they are checked for consistency.
See Also
--------
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
Any combination of three of the four parameters can be used for
creation of a partition:
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
In higher dimensions, the parameters can be given differently in
each axis. Where ``None`` is given, the value will be computed:
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[4, 2])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[None, 2], cell_sides=[0.25, None])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2],
... shape=[4, 2], cell_sides=[0.25, 1])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
By default, no grid points are placed on the boundary:
>>> part = odl.uniform_partition(0, 1, 4)
>>> part.nodes_on_bdry
False
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3),
... nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
# Normalize partition parameters
# np.size(None) == 1, so that would screw it for sizes 0 of the rest
sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides)
if p is not None]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv,
keep_none=True)
cell_sides = normalized_scalar_param_list(cell_sides, ndim,
param_conv=float, keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt, shape
for i, (xmin, xmax, n, dx, on_bdry) in enumerate(
zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)):
num_params = sum(p is not None for p in (xmin, xmax, n, dx))
if num_params < 3:
raise ValueError('in axis {}: expected at least 3 of the '
'parameters `min_pt`, `max_pt`, `shape`, '
'`cell_sides`, got {}'
''.format(i, num_params))
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
# For each node on the boundary, we subtract 1/2 from the number of
# full cells between min_pt and max_pt.
if xmin is None:
min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif xmax is None:
max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif n is None:
# Here we add to n since (e-b)/s gives the reduced number of cells.
n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0
n_round = int(round(n_calc))
if abs(n_calc - n_round) > 1e-5:
raise ValueError('in axis {}: calculated number of nodes '
'{} = ({} - {}) / {} too far from integer'
''.format(i, n_calc, xmax, xmin, dx))
shape[i] = n_round
elif dx is None:
pass
else:
xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
if not np.isclose(xmax, xmax_calc):
raise ValueError('in axis {}: calculated endpoint '
'{} = {} + {} * {} too far from given '
'endpoint {}.'
''.format(i, xmax_calc, xmin, n, dx, xmax))
return uniform_partition_fromintv(
IntervalProd(min_pt, max_pt), shape, nodes_on_bdry)
def nonuniform_partition(*coord_vecs, **kwargs):
"""Return a partition with un-equally sized cells.
Parameters
----------
coord_vecs1, ... coord_vecsN : `array-like`
Arrays of coordinates of the mid-points of the partition cells.
min_pt, max_pt : float or sequence of floats, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Cannot be given with both min_pt and max_pt since they determine the
same thing.
Default: ``False``
See Also
--------
uniform_partition : uniformly spaced points
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
With uniformly spaced points the result is the same as a
uniform partition:
>>> odl.nonuniform_partition([0, 1, 2, 3])
uniform_partition(-0.5, 3.5, 4)
>>> odl.nonuniform_partition([0, 1, 2, 3], [1, 2])
uniform_partition([-0.5, 0.5], [ 3.5, 2.5], (4, 2))
If the points are not uniformly spaced, a nonuniform partition is
created. Note that the containing interval is calculated by assuming
that the points are in the middle of the sub-intervals:
>>> odl.nonuniform_partition([0, 1, 3])
nonuniform_partition(
[ 0., 1., 3.]
)
Higher dimensional partitions are created by specifying the gridpoints
along each dimension:
>>> odl.nonuniform_partition([0, 1, 3], [1, 2])
nonuniform_partition(
[ 0., 1., 3.],
[ 1., 2.]
)
Partitions with a single element are by default degenerate
>>> odl.nonuniform_partition(1)
uniform_partition(1.0, 1.0, 1, nodes_on_bdry=True)
If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter
can be used:
>>> odl.nonuniform_partition([0, 1, 3], nodes_on_bdry=True)
nonuniform_partition(
[ 0., 1., 3.],
nodes_on_bdry=True
)
Users can also manually specify the containing intervals dimensions by
using the ``min_pt`` and ``max_pt`` arguments:
>>> odl.nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3)
nonuniform_partition(
[ 0., 1., 3.],
min_pt=-2.0, max_pt=3.0
)
"""
# Get parameters from kwargs
min_pt = kwargs.pop('min_pt', None)
max_pt = kwargs.pop('max_pt', None)
nodes_on_bdry = kwargs.pop('nodes_on_bdry', False)
# np.size(None) == 1
sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt
for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate(
zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)):
# Check input for redundancy
if xmin is not None and bdry_l:
raise ValueError('in axis {}: got both `min_pt` and '
'`nodes_on_bdry=True`'.format(i))
if xmax is not None and bdry_r:
raise ValueError('in axis {}: got both `max_pt` and '
'`nodes_on_bdry=True`'.format(i))
# Handle length 1 inputs
coords = np.array(coords, copy=False, ndmin=1)
# Compute boundary position if not given by user
if xmin is None:
if bdry_l or len(coords) == 1:
min_pt[i] = coords[0]
else:
min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0
if xmax is None:
if bdry_r or len(coords) == 1:
max_pt[i] = coords[-1]
else:
max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0
interval = IntervalProd(min_pt, max_pt)
grid = RectGrid(*coord_vecs)
return RectPartition(interval, grid)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| mpl-2.0 | -1,534,553,825,121,315,600 | 34.962132 | 79 | 0.526462 | false |
ahmed-mahran/hue | desktop/core/ext-py/markdown/markdown/odict.py | 143 | 5157 | class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| apache-2.0 | 4,285,468,236,173,810,700 | 30.833333 | 79 | 0.536746 | false |
techhat/libcloud | libcloud/test/common/test_openstack_identity.py | 8 | 33173 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datetime
try:
import simplejson as json
except ImportError:
import json
from mock import Mock
from libcloud.utils.py3 import httplib
from libcloud.common.openstack import OpenStackBaseConnection
from libcloud.common.openstack_identity import AUTH_TOKEN_EXPIRES_GRACE_SECONDS
from libcloud.common.openstack_identity import get_class_for_auth_version
from libcloud.common.openstack_identity import OpenStackServiceCatalog
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection
from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection_OIDC_access_token
from libcloud.common.openstack_identity import OpenStackIdentityUser
from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver
from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection_VOMS
from libcloud.test import unittest
from libcloud.test import MockHttp
from libcloud.test.secrets import OPENSTACK_PARAMS
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.test.compute.test_openstack import OpenStackMockHttp
from libcloud.test.compute.test_openstack import OpenStack_2_0_MockHttp
class OpenStackIdentityConnectionTestCase(unittest.TestCase):
def setUp(self):
OpenStackBaseConnection.auth_url = None
OpenStackBaseConnection.conn_class = OpenStackMockHttp
def test_auth_url_is_correctly_assembled(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
APPEND = 0
NOTAPPEND = 1
auth_urls = [
('https://auth.api.example.com', APPEND, ''),
('https://auth.api.example.com/', NOTAPPEND, '/'),
('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'),
('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/')
]
actions = {
'1.0': '/v1.0',
'1.1': '/v1.1/auth',
'2.0': '/v2.0/tokens',
'2.0_apikey': '/v2.0/tokens',
'2.0_password': '/v2.0/tokens'
}
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
for (url, should_append_default_path, expected_path) in auth_urls:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class,
auth_url=url)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
try:
osa = osa.authenticate()
except:
pass
if (should_append_default_path == APPEND):
expected_path = actions[auth_version]
self.assertEqual(osa.action, expected_path)
def test_basic_authentication(self):
tuples = [
('1.0', OpenStackMockHttp),
('1.1', OpenStackMockHttp),
('2.0', OpenStack_2_0_MockHttp),
('2.0_apikey', OpenStack_2_0_MockHttp),
('2.0_password', OpenStack_2_0_MockHttp)
]
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
for (auth_version, mock_http_class) in tuples:
connection = \
self._get_mock_connection(mock_http_class=mock_http_class)
auth_url = connection.auth_url
cls = get_class_for_auth_version(auth_version=auth_version)
osa = cls(auth_url=auth_url, user_id=user_id, key=key,
parent_conn=connection)
self.assertEqual(osa.urls, {})
self.assertEqual(osa.auth_token, None)
self.assertEqual(osa.auth_user_info, None)
osa = osa.authenticate()
self.assertTrue(len(osa.urls) >= 1)
self.assertTrue(osa.auth_token is not None)
if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_token_expires is not None)
if auth_version in ['2.0', '2.0_apikey', '2.0_password']:
self.assertTrue(osa.auth_user_info is not None)
def test_token_expiration_and_force_reauthentication(self):
user_id = OPENSTACK_PARAMS[0]
key = OPENSTACK_PARAMS[1]
connection = self._get_mock_connection(OpenStack_2_0_MockHttp)
auth_url = connection.auth_url
yesterday = datetime.datetime.today() - datetime.timedelta(1)
tomorrow = datetime.datetime.today() + datetime.timedelta(1)
osa = OpenStackIdentity_2_0_Connection(auth_url=auth_url,
user_id=user_id,
key=key,
parent_conn=connection)
mocked_auth_method = Mock(wraps=osa._authenticate_2_0_with_body)
osa._authenticate_2_0_with_body = mocked_auth_method
# Force re-auth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
count = 5
for i in range(0, count):
osa.authenticate(force=True)
self.assertEqual(mocked_auth_method.call_count, count)
# No force reauth, expired token
osa.auth_token = None
osa.auth_token_expires = yesterday
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
osa.authenticate(force=False)
if i == 0:
osa.auth_token_expires = tomorrow
self.assertEqual(mocked_auth_method.call_count, 1)
# No force reauth, valid / non-expired token which is about to expire in
# less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS
soon = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1)
osa.auth_token = None
mocked_auth_method.call_count = 0
self.assertEqual(mocked_auth_method.call_count, 0)
for i in range(0, count):
if i == 0:
osa.auth_token_expires = soon
osa.authenticate(force=False)
self.assertEqual(mocked_auth_method.call_count, 1)
def _get_mock_connection(self, mock_http_class, auth_url=None):
OpenStackBaseConnection.conn_class = mock_http_class
if auth_url is None:
auth_url = "https://auth.api.example.com"
OpenStackBaseConnection.auth_url = auth_url
connection = OpenStackBaseConnection(*OPENSTACK_PARAMS)
connection._ex_force_base_url = "https://www.foo.com"
connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS)
return connection
class OpenStackIdentity_2_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_MockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test')
self.auth_instance.auth_token = 'mock'
def test_list_projects(self):
result = self.auth_instance.list_projects()
self.assertEqual(len(result), 2)
self.assertEqual(result[0].id, 'a')
self.assertEqual(result[0].name, 'test')
self.assertEqual(result[0].description, 'test project')
self.assertTrue(result[0].enabled)
class OpenStackIdentity_3_0_ConnectionTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
tenant_name='test')
self.auth_instance.auth_token = 'mock'
def test_token_scope_argument(self):
# Invalid token_scope value
expected_msg = 'Invalid value for "token_scope" argument: foo'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='foo')
# Missing tenant_name
expected_msg = 'Must provide tenant_name and domain_name argument'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='project')
# Missing domain_name
expected_msg = 'Must provide domain_name argument'
self.assertRaisesRegexp(ValueError, expected_msg,
OpenStackIdentity_3_0_Connection,
auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
domain_name=None)
# Scope to project all ok
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='project',
tenant_name='test',
domain_name='Default')
# Scope to domain
OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test',
key='test',
token_scope='domain',
tenant_name=None,
domain_name='Default')
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection(auth_url='http://none',
user_id='test_user_id',
key='test_key',
token_scope='project',
tenant_name="test_tenant",
domain_name='test_domain')
auth.authenticate()
def test_list_supported_versions(self):
OpenStackIdentity_3_0_MockHttp.type = 'v3'
versions = self.auth_instance.list_supported_versions()
self.assertEqual(len(versions), 2)
self.assertEqual(versions[0].version, 'v2.0')
self.assertEqual(versions[0].url,
'http://192.168.18.100:5000/v2.0/')
self.assertEqual(versions[1].version, 'v3.0')
self.assertEqual(versions[1].url,
'http://192.168.18.100:5000/v3/')
def test_list_domains(self):
domains = self.auth_instance.list_domains()
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0].id, 'default')
self.assertEqual(domains[0].name, 'Default')
self.assertTrue(domains[0].enabled)
def test_list_projects(self):
projects = self.auth_instance.list_projects()
self.assertEqual(len(projects), 4)
self.assertEqual(projects[0].id, 'a')
self.assertEqual(projects[0].domain_id, 'default')
self.assertTrue(projects[0].enabled)
self.assertEqual(projects[0].description, 'Test project')
def test_list_users(self):
users = self.auth_instance.list_users()
self.assertEqual(len(users), 12)
self.assertEqual(users[0].id, 'a')
self.assertEqual(users[0].domain_id, 'default')
self.assertEqual(users[0].enabled, True)
self.assertEqual(users[0].email, 'openstack-test@localhost')
def test_list_roles(self):
roles = self.auth_instance.list_roles()
self.assertEqual(len(roles), 2)
self.assertEqual(roles[1].id, 'b')
self.assertEqual(roles[1].name, 'admin')
def test_list_user_projects(self):
user = self.auth_instance.list_users()[0]
projects = self.auth_instance.list_user_projects(user=user)
self.assertEqual(len(projects), 0)
def test_list_user_domain_roles(self):
user = self.auth_instance.list_users()[0]
domain = self.auth_instance.list_domains()[0]
roles = self.auth_instance.list_user_domain_roles(domain=domain,
user=user)
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0].name, 'admin')
def test_get_domain(self):
domain = self.auth_instance.get_domain(domain_id='default')
self.assertEqual(domain.name, 'Default')
def test_create_user(self):
user = self.auth_instance.create_user(email='test2@localhost', password='test1',
name='test2', domain_id='default')
self.assertEqual(user.id, 'c')
self.assertEqual(user.name, 'test2')
def test_enable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.enable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_disable_user(self):
user = self.auth_instance.list_users()[0]
result = self.auth_instance.disable_user(user=user)
self.assertTrue(isinstance(result, OpenStackIdentityUser))
def test_grant_domain_role_to_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_domain_role_to_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_domain_role_from_user(self):
domain = self.auth_instance.list_domains()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_domain_role_from_user(domain=domain,
role=role,
user=user)
self.assertTrue(result)
def test_grant_project_role_to_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.grant_project_role_to_user(project=project,
role=role,
user=user)
self.assertTrue(result)
def test_revoke_project_role_from_user(self):
project = self.auth_instance.list_projects()[0]
role = self.auth_instance.list_roles()[0]
user = self.auth_instance.list_users()[0]
result = self.auth_instance.revoke_project_role_from_user(project=project,
role=role,
user=user)
self.assertTrue(result)
class OpenStackIdentity_3_0_Connection_OIDC_access_tokenTests(
unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_3_0_MockHttp
mock_cls.type = None
OpenStackIdentity_3_0_Connection_OIDC_access_token.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
tenant_name='oidc',
domain_name='test_domain')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_3_0_Connection_OIDC_access_token(auth_url='http://none',
user_id='idp',
key='token',
token_scope='project',
tenant_name="oidc",
domain_name='test_domain')
auth.authenticate()
class OpenStackIdentity_2_0_Connection_VOMSTests(unittest.TestCase):
def setUp(self):
mock_cls = OpenStackIdentity_2_0_Connection_VOMSMockHttp
mock_cls.type = None
OpenStackIdentity_2_0_Connection_VOMS.conn_class = mock_cls
self.auth_instance = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
tenant_name='VO')
self.auth_instance.auth_token = 'mock'
def test_authenticate(self):
auth = OpenStackIdentity_2_0_Connection_VOMS(auth_url='http://none',
user_id=None,
key='/tmp/proxy.pem',
token_scope='test',
tenant_name="VO")
auth.authenticate()
class OpenStackServiceCatalogTestCase(unittest.TestCase):
fixtures = ComputeFileFixtures('openstack')
def test_parsing_auth_v1_1(self):
data = self.fixtures.load('_v1_1__auth.json')
data = json.loads(data)
service_catalog = data['auth']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='1.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 3)
entry = [e for e in entries if e.service_type == 'cloudFilesCDN'][0]
self.assertEqual(entry.service_type, 'cloudFilesCDN')
self.assertEqual(entry.service_name, None)
self.assertEqual(len(entry.endpoints), 2)
self.assertEqual(entry.endpoints[0].region, 'ORD')
self.assertEqual(entry.endpoints[0].url,
'https://cdn2.clouddrive.com/v1/MossoCloudFS')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'LON')
self.assertEqual(entry.endpoints[1].endpoint_type, 'external')
def test_parsing_auth_v2(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
entries = catalog.get_entries()
self.assertEqual(len(entries), 6)
entry = [e for e in entries if e.service_name == 'cloudServers'][0]
self.assertEqual(entry.service_type, 'compute')
self.assertEqual(entry.service_name, 'cloudServers')
self.assertEqual(len(entry.endpoints), 1)
self.assertEqual(entry.endpoints[0].region, None)
self.assertEqual(entry.endpoints[0].url,
'https://servers.api.rackspacecloud.com/v1.0/1337')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
def test_parsing_auth_v3(self):
data = self.fixtures.load('_v3__auth.json')
data = json.loads(data)
service_catalog = data['token']['catalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='3.x')
entries = catalog.get_entries()
self.assertEqual(len(entries), 6)
entry = [e for e in entries if e.service_type == 'volume'][0]
self.assertEqual(entry.service_type, 'volume')
self.assertEqual(entry.service_name, None)
self.assertEqual(len(entry.endpoints), 3)
self.assertEqual(entry.endpoints[0].region, 'regionOne')
self.assertEqual(entry.endpoints[0].endpoint_type, 'external')
self.assertEqual(entry.endpoints[1].region, 'regionOne')
self.assertEqual(entry.endpoints[1].endpoint_type, 'admin')
self.assertEqual(entry.endpoints[2].region, 'regionOne')
self.assertEqual(entry.endpoints[2].endpoint_type, 'internal')
def test_get_public_urls(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
public_urls = catalog.get_public_urls(service_type='object-store')
expected_urls = ['https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111',
'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111']
self.assertEqual(public_urls, expected_urls)
def test_get_regions(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
regions = catalog.get_regions(service_type='object-store')
self.assertEqual(regions, ['LON', 'ORD'])
regions = catalog.get_regions(service_type='invalid')
self.assertEqual(regions, [])
def test_get_service_types(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_types = catalog.get_service_types()
self.assertEqual(service_types, ['compute', 'object-store',
'rax:object-cdn'])
service_types = catalog.get_service_types(region='ORD')
self.assertEqual(service_types, ['rax:object-cdn'])
def test_get_service_names(self):
data = self.fixtures.load('_v2_0__auth.json')
data = json.loads(data)
service_catalog = data['access']['serviceCatalog']
catalog = OpenStackServiceCatalog(service_catalog=service_catalog,
auth_version='2.0')
service_names = catalog.get_service_names()
self.assertEqual(service_names, ['cloudFiles', 'cloudFilesCDN',
'cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'nova'])
service_names = catalog.get_service_names(service_type='compute')
self.assertEqual(service_names, ['cloudServers',
'cloudServersOpenStack',
'cloudServersPreprod',
'nova'])
class OpenStackIdentity_2_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v2_0_tenants.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_3_0_MockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v3')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v3(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_versions.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_domains.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_projects(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_auth_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.OK
data = json.loads(body)
if 'password' in data['auth']['identity']:
if data['auth']['identity']['password']['user']['domain']['name'] != 'test_domain' or \
data['auth']['scope']['project']['domain']['name'] != 'test_domain':
status = httplib.UNAUTHORIZED
body = ComputeFileFixtures('openstack').load('_v3__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users(self, method, url, body, headers):
if method == 'GET':
# list users
body = self.fixtures.load('v3_users.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
elif method == 'POST':
# create user
body = self.fixtures.load('v3_create_user.json')
return (httplib.CREATED, body, self.json_content_headers,
httplib.responses[httplib.CREATED])
raise NotImplementedError()
def _v3_users_a(self, method, url, body, headers):
if method == 'PATCH':
# enable / disable user
body = self.fixtures.load('v3_users_a.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_roles(self, method, url, body, headers):
if method == 'GET':
body = self.fixtures.load('v3_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke domain role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_projects_a_users_a_roles_a(self, method, url, body, headers):
if method == 'PUT':
# grant project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
elif method == 'DELETE':
# revoke project role
body = ''
return (httplib.NO_CONTENT, body, self.json_content_headers,
httplib.responses[httplib.NO_CONTENT])
raise NotImplementedError()
def _v3_domains_default(self, method, url, body, headers):
if method == 'GET':
# get domain
body = self.fixtures.load('v3_domains_default.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_users_a_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = self.fixtures.load('v3_users_a_projects.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_domains_default_users_a_roles(self, method, url, body, headers):
if method == 'GET':
# get user domain roles
body = self.fixtures.load('v3_domains_default_users_a_roles.json')
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_identity_providers_idp_protocols_oidc_auth(self, method, url, body, headers):
if method == 'GET':
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v3_OS_FEDERATION_projects(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"projects": [{"id": "project_id"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
class OpenStackIdentity_2_0_Connection_VOMSMockHttp(MockHttp):
fixtures = ComputeFileFixtures('openstack_identity/v2')
json_content_headers = {'content-type': 'application/json; charset=UTF-8'}
def _v2_0_tokens(self, method, url, body, headers):
if method == 'POST':
status = httplib.UNAUTHORIZED
data = json.loads(body)
if 'voms' in data['auth'] and data['auth']['voms'] is True:
status = httplib.OK
body = ComputeFileFixtures('openstack').load('_v2_0__auth.json')
headers = self.json_content_headers.copy()
headers['x-subject-token'] = '00000000000000000000000000000000'
return (status, body, headers, httplib.responses[httplib.OK])
raise NotImplementedError()
def _v2_0_tenants(self, method, url, body, headers):
if method == 'GET':
# get user projects
body = json.dumps({"tenant": [{"name": "tenant_name"}]})
return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK])
raise NotImplementedError()
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 | -7,635,700,592,050,106,000 | 42.763852 | 117 | 0.562476 | false |
mozilla/relman-auto-nag | auto_nag/round_robin.py | 2 | 6925 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
from random import randint
from dateutil.relativedelta import relativedelta
from libmozdata import utils as lmdutils
from libmozdata.bugzilla import BugzillaUser
from auto_nag import logger, utils
from auto_nag.people import People
from auto_nag.round_robin_calendar import Calendar
class RoundRobin(object):
_instances = {}
def __init__(self, rr=None, people=None, teams=None):
self.people = People.get_instance() if people is None else people
self.components_by_triager = {}
self.all_calendars = []
self.feed(teams, rr=rr)
self.nicks = {}
self.erroneous_bzmail = {}
utils.init_random()
@staticmethod
def get_instance(teams=None):
if teams is None:
if None not in RoundRobin._instances:
RoundRobin._instances[None] = RoundRobin()
return RoundRobin._instances[None]
teams = tuple(teams)
if teams not in RoundRobin._instances:
RoundRobin._instances[teams] = RoundRobin(teams=teams)
return RoundRobin._instances[teams]
def get_calendar(self, team, data):
fallback = data["fallback"]
strategies = set(data["components"].values())
res = {}
for strategy in strategies:
url = data[strategy]["calendar"]
res[strategy] = Calendar.get(url, fallback, team, people=self.people)
return res
def feed(self, teams, rr=None):
self.data = {}
filenames = {}
if rr is None:
rr = {}
for team, path in utils.get_config(
"round-robin", "teams", default={}
).items():
if teams is not None and team not in teams:
continue
with open("./auto_nag/scripts/configs/{}".format(path), "r") as In:
rr[team] = json.load(In)
filenames[team] = path
# rr is dictionary:
# - doc -> documentation
# - components -> dictionary: Product::Component -> strategy name
# - strategies: dictionary: {calendar: url}
# Get all the strategies for each team
for team, data in rr.items():
calendars = self.get_calendar(team, data)
self.all_calendars += list(calendars.values())
# finally self.data is a dictionary:
# - Product::Component -> dictionary {fallback: who to nag when we've nobody
# calendar}
for pc, strategy in data["components"].items():
self.data[pc] = calendars[strategy]
def get_components(self):
return list(self.data.keys())
def get_components_for_triager(self, triager):
return self.components_by_triager[triager]
def add_component_for_triager(self, component, triagers):
if not isinstance(triagers, list):
triagers = [triagers]
for triager in triagers:
if triager in self.components_by_triager:
self.components_by_triager[triager].add(component)
else:
self.components_by_triager[triager] = {component}
def get_fallback(self, bug):
pc = bug["product"] + "::" + bug["component"]
if pc not in self.data:
mail = bug.get("triage_owner")
else:
cal = self.data[pc]
mail = cal.get_fallback_bzmail()
return self.people.get_moz_mail(mail)
def get_erroneous_bzmail(self):
return self.erroneous_bzmail
def add_erroneous_bzmail(self, bzmail, prod_comp, cal):
logger.error(f"No nick for {bzmail} for {prod_comp}")
fb = cal.get_fallback_mozmail()
if fb not in self.erroneous_bzmail:
self.erroneous_bzmail[fb] = {bzmail}
else:
self.erroneous_bzmail[fb].add(bzmail)
def get_nick(self, bzmail, prod_comp, cal):
if bzmail not in self.nicks:
def handler(user):
self.nicks[bzmail] = user["nick"]
BugzillaUser(user_names=[bzmail], user_handler=handler).wait()
if bzmail not in self.nicks:
self.add_erroneous_bzmail(bzmail, prod_comp, cal)
return None
return self.nicks[bzmail]
def get(self, bug, date, only_one=True, has_nick=True):
pc = bug["product"] + "::" + bug["component"]
if pc not in self.data:
mail = bug.get("triage_owner")
nick = bug.get("triage_owner_detail", {}).get("nick")
if utils.is_no_assignee(mail):
mail, nick = None, None
if mail is None:
logger.error("No triage owner for {}".format(pc))
self.add_component_for_triager(pc, mail)
if has_nick:
return mail, nick if only_one else [(mail, nick)]
return mail if only_one else [mail]
cal = self.data[pc]
persons = cal.get_persons(date)
fb = cal.get_fallback_bzmail()
if not persons or all(p is None for _, p in persons):
# the fallback is the triage owner
self.add_component_for_triager(pc, [fb])
return (fb, self.get_nick(fb, pc, cal)) if has_nick else fb
bzmails = []
for _, p in persons:
bzmails.append(fb if p is None else p)
self.add_component_for_triager(pc, bzmails)
if only_one:
bzmail = bzmails[randint(0, len(bzmails) - 1)]
if has_nick:
nick = self.get_nick(bzmail, pc, cal)
return bzmail, nick
return bzmail
if has_nick:
return [(bzmail, self.get_nick(bzmail, pc, cal)) for bzmail in bzmails]
return bzmails
def get_who_to_nag(self, date):
fallbacks = {}
date = lmdutils.get_date_ymd(date)
days = utils.get_config("round-robin", "days_to_nag", 7)
next_date = date + relativedelta(days=days)
for cal in self.all_calendars:
persons = cal.get_persons(next_date)
if persons and all(p is not None for _, p in persons):
continue
name = cal.get_team_name()
fb = cal.get_fallback_mozmail()
if fb not in fallbacks:
fallbacks[fb] = {}
if name not in fallbacks[fb]:
fallbacks[fb][name] = {"nobody": False, "persons": []}
info = fallbacks[fb][name]
if not persons:
info["nobody"] = True
else:
people_names = [n for n, p in persons if p is None]
if people_names:
info["persons"] += people_names
return fallbacks
| bsd-3-clause | 6,921,480,445,484,708,000 | 34.152284 | 88 | 0.563177 | false |
mszewczy/odoo | addons/event/wizard/__init__.py | 435 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import event_confirm
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -8,410,107,059,147,258,000 | 43.458333 | 78 | 0.615745 | false |
overtherain/scriptfile | software/googleAppEngine/lib/jinja2/jinja2/nodes.py | 122 | 28750 | # -*- coding: utf-8 -*-
"""
jinja2.nodes
~~~~~~~~~~~~
This module implements additional nodes derived from the ast base node.
It also provides some node tree helper functions like `in_lineno` and
`get_nodes` used by the parser and translator in order to normalize
python and jinja nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import operator
from itertools import chain, izip
from collections import deque
from jinja2.utils import Markup, MethodType, FunctionType
#: the types we support for context functions
_context_function_types = (FunctionType, MethodType)
_binop_to_func = {
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod,
'+': operator.add,
'-': operator.sub
}
_uaop_to_func = {
'not': operator.not_,
'+': operator.pos,
'-': operator.neg
}
_cmpop_to_func = {
'eq': operator.eq,
'ne': operator.ne,
'gt': operator.gt,
'gteq': operator.ge,
'lt': operator.lt,
'lteq': operator.le,
'in': lambda a, b: a in b,
'notin': lambda a, b: a not in b
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(cls, name, bases, d):
for attr in 'fields', 'attributes':
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, 'multiple inheritance not allowed'
assert len(storage) == len(set(storage)), 'layout conflict'
d[attr] = tuple(storage)
d.setdefault('abstract', False)
return type.__new__(cls, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError('if no eval context is passed, the '
'node must have an attached '
'environment.')
return EvalContext(node.environment)
return ctx
class Node(object):
"""Baseclass for all Jinja2 nodes. There are a number of nodes available
of different types. There are three major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
__metaclass__ = NodeType
fields = ()
attributes = ('lineno', 'environment')
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError('abstract nodes are not instanciable')
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError('%r takes 0 arguments' %
self.__class__.__name__)
raise TypeError('%r takes 0 or %d argument%s' % (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and 's' or ''
))
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError('unknown attribute %r' %
iter(attributes).next())
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (exclude is only is None) or \
(exclude is not None and name not in exclude) or \
(only is not None and name in only):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for field, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if 'ctx' in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if 'lineno' in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and \
tuple(self.iter_fields()) == tuple(other.iter_fields())
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
arg in self.fields)
)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ('body',)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ('nodes',)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ('template',)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ('test', 'body', 'else_')
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ('name', 'args', 'defaults', 'body')
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ('call', 'args', 'defaults', 'body')
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ('body', 'filter')
class Block(Stmt):
"""A node that represents a block."""
fields = ('name', 'body', 'scoped')
class Include(Stmt):
"""A node that represents the include tag."""
fields = ('template', 'with_context', 'ignore_missing')
class Import(Stmt):
"""A node that represents the import tag."""
fields = ('template', 'target', 'with_context')
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ('template', 'names', 'with_context')
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ('node',)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ('target', 'node')
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ('left', 'right')
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_binops:
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ('node',)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if self.environment.sandboxed and \
self.operator in self.environment.intercepted_unops:
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ('name', 'ctx')
def can_assign(self):
return self.name not in ('true', 'false', 'none',
'True', 'False', 'None')
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ('value',)
def as_const(self, eval_ctx=None):
return self.value
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ('data',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ('items', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ('items',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ('key', 'value')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list beause it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, 'contextfilter', False):
raise Impossible()
obj = self.node.as_const(eval_ctx)
args = [x.as_const(eval_ctx) for x in self.args]
if getattr(filter_, 'evalcontextfilter', False):
args.insert(0, eval_ctx)
elif getattr(filter_, 'environmentfilter', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return filter_(obj, *args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
obj = self.node.as_const(eval_ctx)
# don't evaluate context functions
args = [x.as_const(eval_ctx) for x in self.args]
if isinstance(obj, _context_function_types):
if getattr(obj, 'contextfunction', False):
raise Impossible()
elif getattr(obj, 'evalcontextfunction', False):
args.insert(0, eval_ctx)
elif getattr(obj, 'environmentfunction', False):
args.insert(0, self.environment)
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs)
if self.dyn_args is not None:
try:
args.extend(self.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if self.dyn_kwargs is not None:
try:
kwargs.update(self.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
try:
return obj(*args, **kwargs)
except Exception:
raise Impossible()
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ('node', 'arg', 'ctx')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != 'load':
raise Impossible()
try:
return self.environment.getitem(self.node.as_const(eval_ctx),
self.arg.as_const(eval_ctx))
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ('node', 'attr', 'ctx')
def as_const(self, eval_ctx=None):
if self.ctx != 'load':
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx),
self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ('start', 'stop', 'step')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ('nodes',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return ''.join(unicode(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\s.
"""
fields = ('expr', 'ops')
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ('op', 'expr')
if __debug__:
Operand.__doc__ += '\nThe following operators are available: ' + \
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
set(_uaop_to_func) | set(_cmpop_to_func)))
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = '*'
class Div(BinExpr):
"""Divides the left by the right node."""
operator = '/'
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = '//'
class Add(BinExpr):
"""Add the left to the right node."""
operator = '+'
class Sub(BinExpr):
"""Substract the right from the left node."""
operator = '-'
class Mod(BinExpr):
"""Left modulo right."""
operator = '%'
class Pow(BinExpr):
"""Left to the power of right."""
operator = '**'
class And(BinExpr):
"""Short circuited AND."""
operator = 'and'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = 'or'
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = 'not'
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = '-'
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = '+'
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ('name',)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ('identifier', 'name')
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ('importname',)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ('name',)
def __init__(self):
raise TypeError('Can\'t create internal names. Use the '
'`free_identifier` method on a parser.')
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ('expr',)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ('body',)
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ('options',)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ('body',)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError('can\'t create custom node types')
NodeType.__new__ = staticmethod(_failing_new); del _failing_new
| mit | 6,947,535,176,784,818,000 | 30.593407 | 81 | 0.600243 | false |
xianggong/m2c_unit_test | test/operator/remainder_char8char8/compile.py | 1861 | 4430 | #!/usr/bin/python
import os
import subprocess
import re
def runCommand(command):
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
return iter(p.stdout.readline, b'')
def dumpRunCommand(command, dump_file_name, postfix):
dumpFile = open(dump_file_name + postfix, "w+")
dumpFile.write(command + "\n")
for line in runCommand(command.split()):
dumpFile.write(line)
def rmFile(file_name):
cmd = "rm -rf " + file_name
runCommand(cmd.split())
def rnm_ir(file_name):
# Append all unnamed variable with prefix 'tmp_'
ir_file_name = file_name + ".ll"
if os.path.isfile(ir_file_name):
fo = open(ir_file_name, "rw+")
lines = fo.readlines()
fo.seek(0)
fo.truncate()
for line in lines:
# Add entry block identifier
if "define" in line:
line += "entry:\n"
# Rename all unnamed variables
line = re.sub('\%([0-9]+)',
r'%tmp_\1',
line.rstrip())
# Also rename branch name
line = re.sub('(\;\ \<label\>\:)([0-9]+)',
r'tmp_\2:',
line.rstrip())
fo.write(line + '\n')
def gen_ir(file_name):
# Directories
root_dir = '../../../'
header_dir = root_dir + "inc/"
# Headers
header = " -I " + header_dir
header += " -include " + header_dir + "m2c_buildin_fix.h "
header += " -include " + header_dir + "clc/clc.h "
header += " -D cl_clang_storage_class_specifiers "
gen_ir = "clang -S -emit-llvm -O0 -target r600-- -mcpu=verde "
cmd_gen_ir = gen_ir + header + file_name + ".cl"
dumpRunCommand(cmd_gen_ir, file_name, ".clang.log")
def asm_ir(file_name):
if os.path.isfile(file_name + ".ll"):
# Command to assemble IR to bitcode
gen_bc = "llvm-as "
gen_bc_src = file_name + ".ll"
gen_bc_dst = file_name + ".bc"
cmd_gen_bc = gen_bc + gen_bc_src + " -o " + gen_bc_dst
runCommand(cmd_gen_bc.split())
def opt_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to optmize bitcode
opt_bc = "opt --mem2reg "
opt_ir_src = file_name + ".bc"
opt_ir_dst = file_name + ".opt.bc"
cmd_opt_bc = opt_bc + opt_ir_src + " -o " + opt_ir_dst
runCommand(cmd_opt_bc.split())
def dis_bc(file_name):
if os.path.isfile(file_name + ".bc"):
# Command to disassemble bitcode
dis_bc = "llvm-dis "
dis_ir_src = file_name + ".opt.bc"
dis_ir_dst = file_name + ".opt.ll"
cmd_dis_bc = dis_bc + dis_ir_src + " -o " + dis_ir_dst
runCommand(cmd_dis_bc.split())
def m2c_gen(file_name):
if os.path.isfile(file_name + ".opt.bc"):
# Command to disassemble bitcode
m2c_gen = "m2c --llvm2si "
m2c_gen_src = file_name + ".opt.bc"
cmd_m2c_gen = m2c_gen + m2c_gen_src
dumpRunCommand(cmd_m2c_gen, file_name, ".m2c.llvm2si.log")
# Remove file if size is 0
if os.path.isfile(file_name + ".opt.s"):
if os.path.getsize(file_name + ".opt.s") == 0:
rmFile(file_name + ".opt.s")
def m2c_bin(file_name):
if os.path.isfile(file_name + ".opt.s"):
# Command to disassemble bitcode
m2c_bin = "m2c --si2bin "
m2c_bin_src = file_name + ".opt.s"
cmd_m2c_bin = m2c_bin + m2c_bin_src
dumpRunCommand(cmd_m2c_bin, file_name, ".m2c.si2bin.log")
def main():
# Commands
for file in os.listdir("./"):
if file.endswith(".cl"):
file_name = os.path.splitext(file)[0]
# Execute commands
gen_ir(file_name)
rnm_ir(file_name)
asm_ir(file_name)
opt_bc(file_name)
dis_bc(file_name)
m2c_gen(file_name)
m2c_bin(file_name)
if __name__ == "__main__":
main()
| gpl-2.0 | -1,353,948,927,176,512,500 | 31.335766 | 70 | 0.476749 | false |
chenss/ChatRoom | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/core/files/uploadhandler.py | 136 | 7193 | """
Base file upload handler classes, and the built-in concrete subclasses
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile
from django.utils import importlib
__all__ = ['UploadFileException','StopUpload', 'SkipFile', 'FileUploadHandler',
'TemporaryFileUploadHandler', 'MemoryFileUploadHandler',
'load_handler', 'StopFutureHandlers']
class UploadFileException(Exception):
"""
Any error having to do with uploading files.
"""
pass
class StopUpload(UploadFileException):
"""
This exception is raised when an upload must abort.
"""
def __init__(self, connection_reset=False):
"""
If ``connection_reset`` is ``True``, Django knows will halt the upload
without consuming the rest of the upload. This will cause the browser to
show a "connection reset" error.
"""
self.connection_reset = connection_reset
def __unicode__(self):
if self.connection_reset:
return u'StopUpload: Halt current upload.'
else:
return u'StopUpload: Consume request data, then halt.'
class SkipFile(UploadFileException):
"""
This exception is raised by an upload handler that wants to skip a given file.
"""
pass
class StopFutureHandlers(UploadFileException):
"""
Upload handers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
class FileUploadHandler(object):
"""
Base class for streaming upload handlers.
"""
chunk_size = 64 * 2 ** 10 #: The default chunk size is 64 KB.
def __init__(self, request=None):
self.file_name = None
self.content_type = None
self.content_length = None
self.charset = None
self.request = request
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Handle the raw input from the client.
Parameters:
:input_data:
An object that supports reading via .read().
:META:
``request.META``.
:content_length:
The (integer) value of the Content-Length header from the
client.
:boundary: The boundary from the Content-Type header. Be sure to
prepend two '--'.
"""
pass
def new_file(self, field_name, file_name, content_type, content_length,
charset=None, content_type_extra=None):
"""
Signal that a new file has been started.
Warning: As with any data from the client, you should not trust
content_length (and sometimes won't even get it).
"""
self.field_name = field_name
self.file_name = file_name
self.content_type = content_type
self.content_length = content_length
self.charset = charset
if content_type_extra is None:
content_type_extra = {}
self.content_type_extra = content_type_extra
def receive_data_chunk(self, raw_data, start):
"""
Receive data from the streamed upload parser. ``start`` is the position
in the file of the chunk.
"""
raise NotImplementedError()
def file_complete(self, file_size):
"""
Signal that a file has completed. File size corresponds to the actual
size accumulated by all the chunks.
Subclasses should return a valid ``UploadedFile`` object.
"""
raise NotImplementedError()
def upload_complete(self):
"""
Signal that the upload is complete. Subclasses should perform cleanup
that is necessary for this handler.
"""
pass
class TemporaryFileUploadHandler(FileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def __init__(self, *args, **kwargs):
super(TemporaryFileUploadHandler, self).__init__(*args, **kwargs)
def new_file(self, file_name, *args, **kwargs):
"""
Create the file object to append to as data is coming in.
"""
super(TemporaryFileUploadHandler, self).new_file(file_name, *args, **kwargs)
self.file = TemporaryUploadedFile(self.file_name, self.content_type, 0, self.charset)
def receive_data_chunk(self, raw_data, start):
self.file.write(raw_data)
def file_complete(self, file_size):
self.file.seek(0)
self.file.size = file_size
return self.file
class MemoryFileUploadHandler(FileUploadHandler):
"""
File upload handler to stream uploads into memory (used for small files).
"""
def handle_raw_input(self, input_data, META, content_length, boundary, encoding=None):
"""
Use the content_length to signal whether or not this handler should be in use.
"""
# Check the content-length header to see if we should
# If the post is too large, we cannot use the Memory handler.
if content_length > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
self.activated = False
else:
self.activated = True
def new_file(self, *args, **kwargs):
super(MemoryFileUploadHandler, self).new_file(*args, **kwargs)
if self.activated:
self.file = StringIO()
raise StopFutureHandlers()
def receive_data_chunk(self, raw_data, start):
"""
Add the data to the StringIO file.
"""
if self.activated:
self.file.write(raw_data)
else:
return raw_data
def file_complete(self, file_size):
"""
Return a file object if we're activated.
"""
if not self.activated:
return
self.file.seek(0)
return InMemoryUploadedFile(
file = self.file,
field_name = self.field_name,
name = self.file_name,
content_type = self.content_type,
size = file_size,
charset = self.charset
)
def load_handler(path, *args, **kwargs):
"""
Given a path to a handler, return an instance of that handler.
E.g.::
>>> load_handler('django.core.files.uploadhandler.TemporaryFileUploadHandler', request)
<TemporaryFileUploadHandler object at 0x...>
"""
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = importlib.import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing upload handler module %s: "%s"' % (module, e))
except ValueError, e:
raise ImproperlyConfigured('Error importing upload handler module. Is FILE_UPLOAD_HANDLERS a correctly defined list or tuple?')
try:
cls = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" upload handler backend' % (module, attr))
return cls(*args, **kwargs)
| gpl-2.0 | 3,383,330,258,041,220,600 | 31.844749 | 135 | 0.62255 | false |
SUNY-Albany-CCI/INF_202_Survey | languages/fr.py | 13 | 7668 | # coding: utf8
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%s %%{row} deleted': '%s rangées supprimées',
'%s %%{row} updated': '%s rangées mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'Administrative Interface': 'Administrative Interface',
'Administrative interface': "Interface d'administration",
'Ajax Recipes': 'Recettes Ajax',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Contrôleur',
'Copyright': 'Copyright',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s select',
'db': 'db',
'DB Model': 'Modèle DB',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'E-mail',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Email and SMS',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Group ID': 'Groupe ID',
'Groups': 'Groups',
'Hello World': 'Bonjour le monde',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Importer/Exporter',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Internal State': 'État interne',
'Introduction': 'Introduction',
'Invalid email': 'E-mail invalide',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Key': 'Key',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live chat': 'Chat live',
'Live Chat': 'Live Chat',
'login': 'connectez-vous',
'Login': 'Connectez-vous',
'logout': 'déconnectez-vous',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Lost password?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menu modèle',
'My Sites': 'My Sites',
'Name': 'Nom',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'Object or table name': 'Object or table name',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'Password': 'Mot de passe',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous 100 rows': '100 lignes précédentes',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Examples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID d'enregistrement",
'Record id': "id d'enregistrement",
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': 'Registration identifier',
'Registration key': "Clé d'enregistrement",
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Semantic': 'Sémantique',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'état',
'Statistics': 'Statistics',
'Stylesheet': 'Feuille de style',
'submit': 'submit',
'Submit': 'Soumettre',
'Support': 'Support',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Twitter': 'Twitter',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User ID': 'ID utilisateur',
'User Voice': 'User Voice',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenu',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'You are successfully running web2py': 'Vous roulez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You visited the url %s': "Vous avez visité l'URL %s",
}
| apache-2.0 | -2,101,414,199,977,031,400 | 40 | 291 | 0.688728 | false |
etkirsch/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause | 8,051,028,704,210,998,000 | 31.232558 | 79 | 0.635883 | false |
maxamillion/anaconda | pyanaconda/ui/gui/spokes/advstorage/iscsi.py | 3 | 19069 | # iSCSI configuration dialog
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <[email protected]>
#
from IPy import IP
from collections import namedtuple
from gi.repository import GLib
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.utils import escape_markup
from pyanaconda.i18n import _
from pyanaconda import nm
from pyanaconda.regexes import ISCSI_IQN_NAME_REGEX, ISCSI_EUI_NAME_REGEX
__all__ = ["ISCSIDialog"]
STYLE_NONE = 0
STYLE_CHAP = 1
STYLE_REVERSE_CHAP = 2
Credentials = namedtuple("Credentials", ["style",
"targetIP", "initiator", "username",
"password", "rUsername", "rPassword"])
NodeStoreRow = namedtuple("NodeStoreRow", ["selected", "notLoggedIn", "name", "iface", "portal"])
def discover_no_credentials(builder):
return Credentials(STYLE_NONE,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
"", "", "", "")
def discover_chap(builder):
return Credentials(STYLE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("chapUsernameEntry").get_text(),
builder.get_object("chapPasswordEntry").get_text(),
"", "")
def discover_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
builder.get_object("targetEntry").get_text(),
builder.get_object("initiatorEntry").get_text(),
builder.get_object("rchapUsernameEntry").get_text(),
builder.get_object("rchapPasswordEntry").get_text(),
builder.get_object("rchapReverseUsername").get_text(),
builder.get_object("rchapReversePassword").get_text())
# This list maps the current page from the authNotebook to a function to grab
# credentials out of the UI. This works as long as authNotebook keeps the
# filler page at the front.
discoverMap = [discover_no_credentials, discover_chap, discover_reverse_chap]
def login_no_credentials(builder):
return Credentials(STYLE_NONE,
"", "",
"", "", "", "")
def login_chap(builder):
return Credentials(STYLE_CHAP,
"", "",
builder.get_object("loginChapUsernameEntry").get_text(),
builder.get_object("loginChapPasswordEntry").get_text(),
"", "")
def login_reverse_chap(builder):
return Credentials(STYLE_REVERSE_CHAP,
"", "",
builder.get_object("loginRchapUsernameEntry").get_text(),
builder.get_object("loginRchapPasswordEntry").get_text(),
builder.get_object("loginRchapReverseUsername").get_text(),
builder.get_object("loginRchapReversePassword").get_text())
# And this list maps the current page from the loginAuthNotebook to a function
# to grab credentials out of the UI. This works as long as loginAuthNotebook
# keeps the filler page at the front, and we check to make sure "Use the
# credentials from discovery" is not selected first.
loginMap = [login_no_credentials, login_chap, login_reverse_chap]
def credentials_valid(credentials):
if credentials.style == STYLE_NONE:
return True
elif credentials.style == STYLE_CHAP:
return credentials.username.strip() != "" and credentials.password != ""
elif credentials.style == STYLE_REVERSE_CHAP:
return credentials.username.strip() != "" and credentials.password != "" and \
credentials.rUsername.strip() != "" and credentials.rPassword != ""
class ISCSIDialog(GUIObject):
builderObjects = ["iscsiDialog", "nodeStore", "nodeStoreFiltered"]
mainWidgetName = "iscsiDialog"
uiFile = "spokes/advstorage/iscsi.glade"
def __init__(self, data, storage):
GUIObject.__init__(self, data)
self.storage = storage
self.iscsi = self.storage.iscsi()
self._discoveryError = None
self._loginError = False
self._discoveredNodes = []
self._update_devicetree = False
self._authTypeCombo = self.builder.get_object("authTypeCombo")
self._authNotebook = self.builder.get_object("authNotebook")
self._iscsiNotebook = self.builder.get_object("iscsiNotebook")
self._loginButton = self.builder.get_object("loginButton")
self._loginAuthTypeCombo = self.builder.get_object("loginAuthTypeCombo")
self._loginAuthNotebook = self.builder.get_object("loginAuthNotebook")
self._loginGrid = self.builder.get_object("loginGrid")
self._loginConditionNotebook = self.builder.get_object("loginConditionNotebook")
self._configureGrid = self.builder.get_object("configureGrid")
self._conditionNotebook = self.builder.get_object("conditionNotebook")
self._bindCheckbox = self.builder.get_object("bindCheckbutton")
self._startButton = self.builder.get_object("startButton")
self._okButton = self.builder.get_object("okButton")
self._cancelButton = self.builder.get_object("cancelButton")
self._initiatorEntry = self.builder.get_object("initiatorEntry")
self._store = self.builder.get_object("nodeStore")
self._storeFilter = self.builder.get_object("nodeStoreFiltered")
def refresh(self):
self._bindCheckbox.set_active(bool(self.iscsi.ifaces))
self._bindCheckbox.set_sensitive(self.iscsi.mode == "none")
self._authTypeCombo.set_active(0)
self._startButton.set_sensitive(True)
self._loginAuthTypeCombo.set_active(0)
self._storeFilter.set_visible_column(1)
self._initiatorEntry.set_text(self.iscsi.initiator)
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
@property
def selectedNames(self):
return [itr[2] for itr in self._store if itr[0]]
def run(self):
rc = self.window.run()
self.window.destroy()
# We need to call this to get the device nodes to show up
# in our devicetree.
if self._update_devicetree:
self.storage.devicetree.populate()
return rc
##
## DISCOVERY
##
def on_auth_type_changed(self, widget, *args):
self._authNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Start button sensitivity.
self.on_discover_field_changed()
def _discover(self, credentials, bind):
# This needs to be in its own thread, not marked with gtk_action_* because it's
# called from on_start_clicked, which is in the GTK main loop. Those decorators
# won't do anything special in that case.
if not self.iscsi.initiatorSet:
self.iscsi.initiator = credentials.initiator
# interfaces created here affect nodes that iscsi.discover would return
if self.iscsi.mode == "none" and not bind:
self.iscsi.delete_interfaces()
elif (self.iscsi.mode == "bind"
or self.iscsi.mode == "none" and bind):
activated = set(nm.nm_activated_devices())
created = set(self.iscsi.ifaces.values())
self.iscsi.create_interfaces(activated - created)
try:
self._discoveredNodes = self.iscsi.discover(credentials.targetIP,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
except IOError as e:
self._discoveryError = str(e)
return
if len(self._discoveredNodes) == 0:
self._discoveryError = "No nodes discovered."
def _check_discover(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_DISCOVER):
return True
# When iscsi discovery is done, update the UI. We don't need to worry
# about the user escaping from the dialog because all the buttons are
# marked insensitive.
spinner = self.builder.get_object("waitSpinner")
spinner.stop()
if self._discoveryError:
# Failure. Display some error message and leave the user on the
# dialog to try again.
self.builder.get_object("discoveryErrorLabel").set_text(self._discoveryError)
self._discoveryError = None
self._conditionNotebook.set_current_page(2)
self._set_configure_sensitive(True)
else:
# Success. Now populate the node store and kick the user on over to
# that subscreen.
self._add_nodes(self._discoveredNodes)
self._iscsiNotebook.set_current_page(1)
# If some form of login credentials were used for discovery,
# default to using the same for login.
if self._authTypeCombo.get_active() != 0:
self._loginAuthTypeCombo.set_active(3)
# We always want to enable this button, in case the user's had enough.
self._cancelButton.set_sensitive(True)
return False
def _set_configure_sensitive(self, sensitivity):
for child in self._configureGrid.get_children():
if child == self._initiatorEntry:
self._initiatorEntry.set_sensitive(not self.iscsi.initiatorSet)
elif child == self._bindCheckbox:
self._bindCheckbox.set_sensitive(sensitivity and self.iscsi.mode == "none")
elif child != self._conditionNotebook:
child.set_sensitive(sensitivity)
def on_start_clicked(self, *args):
# First, update some widgets to not be usable while discovery happens.
self._startButton.hide()
self._cancelButton.set_sensitive(False)
self._okButton.set_sensitive(False)
self._conditionNotebook.set_current_page(1)
self._set_configure_sensitive(False)
self._initiatorEntry.set_sensitive(False)
# Now get the node discovery credentials.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
discoveredLabelText = _("The following nodes were discovered using the iSCSI initiator "\
"<b>%(initiatorName)s</b> using the target IP address "\
"<b>%(targetAddress)s</b>. Please select which nodes you "\
"wish to log into:") % \
{"initiatorName": escape_markup(credentials.initiator),
"targetAddress": escape_markup(credentials.targetIP)}
discoveredLabel = self.builder.get_object("discoveredLabel")
discoveredLabel.set_markup(discoveredLabelText)
bind = self._bindCheckbox.get_active()
spinner = self.builder.get_object("waitSpinner")
spinner.start()
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_DISCOVER, target=self._discover,
args=(credentials, bind)))
GLib.timeout_add(250, self._check_discover)
# When the initiator name, ip address, and any auth fields are filled in
# valid, only then should the Start button be made sensitive.
def _target_ip_valid(self):
widget = self.builder.get_object("targetEntry")
text = widget.get_text()
try:
IP(text)
return True
except ValueError:
return False
def _initiator_name_valid(self):
widget = self.builder.get_object("initiatorEntry")
text = widget.get_text()
stripped = text.strip()
#iSCSI Naming Standards: RFC 3720 and RFC 3721
#iSCSI Name validation using regex. Name should either match IQN format or EUI format.
return bool(ISCSI_IQN_NAME_REGEX.match(stripped) or ISCSI_EUI_NAME_REGEX.match(stripped))
def on_discover_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
sensitive = self._target_ip_valid() and self._initiator_name_valid() and credentials_valid(credentials)
self._startButton.set_sensitive(sensitive)
##
## LOGGING IN
##
def _add_nodes(self, nodes):
for node in nodes:
iface = self.iscsi.ifaces.get(node.iface, node.iface)
portal = "%s:%s" % (node.address, node.port)
self._store.append([False, True, node.name, iface, portal])
# We should select the first node by default.
self._store[0][0] = True
def on_login_type_changed(self, widget, *args):
self._loginAuthNotebook.set_current_page(widget.get_active())
# When we change the notebook, we also need to reverify the credentials
# in order to set the Log In button sensitivity.
self.on_login_field_changed()
def on_row_toggled(self, button, path):
if not path:
return
# Then, go back and mark just this row as selected.
itr = self._storeFilter.get_iter(path)
itr = self._storeFilter.convert_iter_to_child_iter(itr)
self._store[itr][0] = not self._store[itr][0]
def _login(self, credentials):
for row in self._store:
obj = NodeStoreRow(*row)
if not obj.selected:
continue
for node in self._discoveredNodes:
if obj.notLoggedIn and node.name == obj.name \
and obj.portal == "%s:%s" % (node.address, node.port):
# when binding interfaces match also interface
if self.iscsi.ifaces and \
obj.iface != self.iscsi.ifaces[node.iface]:
continue
(rc, msg) = self.iscsi.log_into_node(node,
username=credentials.username,
password=credentials.password,
r_username=credentials.rUsername,
r_password=credentials.rPassword)
if not rc:
self._loginError = msg
return
self._update_devicetree = True
row[1] = False
def _check_login(self, *args):
if threadMgr.get(constants.THREAD_ISCSI_LOGIN):
return True
spinner = self.builder.get_object("loginSpinner")
spinner.stop()
spinner.hide()
if self._loginError:
self.builder.get_object("loginErrorLabel").set_text(self._loginError)
self._loginError = None
self._loginConditionNotebook.set_current_page(1)
self._cancelButton.set_sensitive(True)
self._loginButton.set_sensitive(True)
else:
anyLeft = False
self._loginConditionNotebook.set_current_page(0)
# Select the now-first target for the user in case they want to
# log into another one.
for row in self._store:
if row[1]:
row[0] = True
anyLeft = True
# And make the login button sensitive if there are any more
# nodes to login to.
self._loginButton.set_sensitive(True)
break
self._okButton.set_sensitive(True)
# Once a node has been logged into, it doesn't make much sense to let
# the user cancel. Cancel what, exactly?
self._cancelButton.set_sensitive(False)
if not anyLeft:
self.window.response(1)
self._set_login_sensitive(True)
return False
def _set_login_sensitive(self, sensitivity):
for child in self._loginGrid.get_children():
if child != self._loginConditionNotebook:
child.set_sensitive(sensitivity)
def on_login_clicked(self, *args):
# Make the buttons UI while we work.
self._okButton.set_sensitive(False)
self._cancelButton.set_sensitive(False)
self._loginButton.set_sensitive(False)
self._loginConditionNotebook.set_current_page(0)
self._set_login_sensitive(False)
spinner = self.builder.get_object("loginSpinner")
spinner.start()
spinner.set_visible(True)
spinner.show()
# Are we reusing the credentials from the discovery step? If so, grab them
# out of the UI again here. They should still be there.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
threadMgr.add(AnacondaThread(name=constants.THREAD_ISCSI_LOGIN, target=self._login,
args=(credentials,)))
GLib.timeout_add(250, self._check_login)
def on_login_field_changed(self, *args):
# Make up a credentials object so we can test if it's valid.
page = self._loginAuthNotebook.get_current_page()
if page == 3:
credentials = discoverMap[self._authNotebook.get_current_page()](self.builder)
else:
credentials = loginMap[page](self.builder)
self._loginButton.set_sensitive(credentials_valid(credentials))
| gpl-2.0 | -1,157,520,728,938,784,000 | 41.094923 | 111 | 0.609314 | false |
gaperez64/acacia4aiger | source/acacia_plus/library_linker.py | 1 | 16085 | # This file is part of Acacia+, a tool for synthesis of reactive systems using antichain-based techniques
# Copyright (C) 2011-2013 UMONS-ULB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from ctypes import *
import os
from constants import *
#### STRUCTURES ####
#### GList C structure
class GList(Structure):
pass
GList._fields_ = [("data", c_void_p),
("next", POINTER(GList)),
("pred", POINTER(GList))]
#### GNode C structure
class GNode(Structure):
pass
GNode._fields_ = [("data", POINTER(c_void_p)),
("next", POINTER(GNode)),
("pred", POINTER(GNode)),
("parent", POINTER(GNode)),
("children", POINTER(GNode))]
#### AlphabetInfo C structure
class AlphabetInfo(Structure):
_fields_ = [("input_size", c_int),
("output_size", c_int),
("input", POINTER(c_char_p)),
("output", POINTER(c_char_p)),
("sigma_input_size", c_int),
("sigma_output_size", c_int),
("sigma_input", POINTER(POINTER(c_ubyte))),
("sigma_output", POINTER(POINTER(c_ubyte)))]
#### Label C structure
class Label(Structure):
_fields_ = [("disjunction_size", c_int),
("disjunction", POINTER(POINTER(c_ubyte)))]
#### TBUCW_tran C structure
class TBUCW_tran(Structure):
pass
#### TBUCW_state C structure
class TBUCW_state(Structure):
_fields_ = [("state_label", c_int),
("nb_in_tran", c_int),
("nb_out_tran", c_int),
("in_tran", POINTER(POINTER(TBUCW_tran))),
("out_tran", POINTER(POINTER(TBUCW_tran))),
("is_accepting", c_byte),
("player", c_byte),
("unbounded", c_byte),
("is_complete", c_byte),
("is_trash", c_byte)]
TBUCW_tran._fields_ = [("state_from", POINTER(TBUCW_state)),
("state_to", POINTER(TBUCW_state)),
("label", POINTER(Label))]
#### TBUCW C structure
class TBUCW(Structure):
_fields_ = [("nb_states", c_int),
("initial_state_index", c_int),
("alphabet", POINTER(AlphabetInfo)),
("v_I", POINTER(POINTER(c_int))),
("v_O", POINTER(POINTER(c_int))),
("dimension", c_int),
("states", POINTER(POINTER(TBUCW_state)))]
#### Antichain C structure
class Antichain(Structure):
_fields_ = [("size", c_int),
("incomparable_elements", POINTER(GList))]
#### SafetyGame C structure
class SafetyGame(Structure):
_fields_ = [("positions_O", POINTER(Antichain)),
("positions_I", POINTER(Antichain)),
("first_to_play", c_byte)]
#### CFInfo C structure
class CFInfo(Structure):
_fields_ = [("starting_player", c_byte),
("composition_size", c_int),
("cf_range_size_sum", c_int),
("k_value", POINTER(c_int)),
("nb_lost_bits", POINTER(c_int)),
("nb_states_by_integer", POINTER(c_int)),
("cf_range_size", POINTER(c_int)),
("end_index_starting_p", POINTER(c_int)),
("start_index_other_p", POINTER(c_int)),
("first_state_other_p_index", POINTER(c_int)),
("automaton", POINTER(POINTER(TBUCW)))]
#### CountingFunction C structure
class CountingFunction(Structure):
_fields_ = [("player", c_byte),
("sum_of_counters", c_int),
("max_counter", c_int),
("mapping", POINTER(c_ubyte)),
("info", POINTER(GNode))]
#### Vector C structure
class Vector(Structure):
_fields_ = [("dimension", c_int),
("max_value", POINTER(c_int)),
("values", POINTER(c_int))]
#### Tuple C structure
class Tuple(Structure):
_fields_ = [("cf", POINTER(CountingFunction)),
("credits", POINTER(Vector))]
#### OtfurResult C structure
class OtfurResult(Structure):
_fields_ = [("winning_positions", POINTER(SafetyGame)),
("otfur_time", c_float),
("winning_positions_computation_time", c_float),
("nb_cf_passed", c_int),
("nb_iter", c_int)]
#### TSTransition C structure
class TSTransition(Structure):
_fields_ = [("from", c_int),
("to", c_int),
("label", POINTER(c_char))]
#### TSState C structure
class TSState(Structure):
_fields_ = [("player", c_byte),
("nb_tr", c_int),
("transitions", POINTER(GList))]
#### TransitionSystem C structure
class TransitionSystem(Structure):
_fields_ = [("nb_states_PO", c_int),
("nb_states_PI", c_int),
("size_states_PO", c_int),
("size_states_PI", c_int),
("nb_initial_states", c_int),
("initial_states", POINTER(c_int)),
("states", POINTER(POINTER(TSState)))]
#### FUNCTIONS LOADING ####
if os.uname()[0] == "Darwin":
lib = cdll.LoadLibrary(MAIN_DIR_PATH +
"lib/acacia_plus.dylib")
elif os.uname()[0] == "Linux":
lib = cdll.LoadLibrary(MAIN_DIR_PATH +
"lib/acacia_plus.so")
else:
print "OS not supported"
exit(0)
##TBUCW
init_tbucw_c = lib.init_tbucw
init_tbucw_c.argtypes = [c_int]
init_tbucw_c.restype = POINTER(TBUCW)
add_state_c = lib.add_state
add_state_c.argtypes = [POINTER(TBUCW), c_int, c_int, c_int, c_byte, c_byte, c_byte, c_byte]
add_state_c.restype = None
add_tran_c = lib.add_tran
add_tran_c.argtypes = [POINTER(TBUCW), c_char_p, c_int, c_int, c_int]
add_tran_c.restype = None
set_initial_state_c = lib.set_initial_state
set_initial_state_c.argtypes = [POINTER(TBUCW), c_int]
set_initial_state_c.restype = None
set_is_accepting_c = lib.set_is_accepting
set_is_accepting_c.argtypes = [POINTER(TBUCW), c_int, c_byte]
set_is_accepting_c.restype = None
set_alphabet_c = lib.set_alphabet
set_alphabet_c.argtypes = [POINTER(TBUCW), POINTER(AlphabetInfo)]
set_alphabet_c.restype = POINTER(TBUCW)
report_accepting_states_c = lib.report_accepting_states
report_accepting_states_c.argtypes = [POINTER(TBUCW)]
report_accepting_states_c.restype = None
duplicate_all_tran_c = lib.duplicate_all_tran
duplicate_all_tran_c.argtypes = [POINTER(TBUCW)]
duplicate_all_tran_c.restype = None
set_is_complete_c = lib.set_is_complete
set_is_complete_c.argtypes = [POINTER(TBUCW)]
set_is_complete_c.restype = None
is_accepting_c = lib.is_accepting
is_accepting_c.argtypes = [POINTER(TBUCW), c_int]
is_accepting_c.restype = c_byte
is_complete_c = lib.is_complete
is_complete_c.argtypes = [POINTER(TBUCW), c_int]
is_complete_c.restype = c_byte
get_player_id_c = lib.get_player_id
get_player_id_c.argtypes = [POINTER(TBUCW), c_int]
get_player_id_c.restype = c_byte
get_formula_c = lib.get_formula
get_formula_c.argtypes = [POINTER(AlphabetInfo), c_byte, c_int]
get_formula_c.restype = c_char_p
get_tbucw_size_c = lib.get_tbucw_size
get_tbucw_size_c.argtypes = [POINTER(TBUCW)]
get_tbucw_size_c.restype = c_int
init_alphabet_c = lib.init_alphabet
init_alphabet_c.argtypes = [c_int, c_int]
init_alphabet_c.restype = POINTER(AlphabetInfo)
add_input_prop_c = lib.add_input_prop
add_input_prop_c.argtypes = [POINTER(AlphabetInfo), c_char_p]
add_input_prop_c.restype = None
add_output_prop_c = lib.add_output_prop
add_output_prop_c.argtypes = [POINTER(AlphabetInfo), c_char_p]
add_output_prop_c.restype = None
compute_alphabets_c = lib.compute_alphabets
compute_alphabets_c.argtypes = [POINTER(AlphabetInfo)]
compute_alphabets_c.restype = None
get_succ_from_sigma_index_c = lib.get_succ_from_sigma_index
get_succ_from_sigma_index_c.argtypes = [POINTER(TBUCW), c_int, c_int]
get_succ_from_sigma_index_c.restype = POINTER(c_int)
get_all_succ_c = lib.get_all_succ
get_all_succ_c.argtypes = [POINTER(TBUCW), c_int]
get_all_succ_c.restype = POINTER(c_int)
print_tbucw_c = lib.print_tbucw
print_tbucw_c.argtypes = [POINTER(TBUCW)]
print_tbucw_c.restype = None
print_tbucw_stats_c = lib.print_tbucw_stats
print_tbucw_stats_c.argtypes = [POINTER(TBUCW)]
print_tbucw_stats_c.restype = None
print_formula_c = lib.print_formula
print_formula_c.argtypes = [POINTER(TBUCW), POINTER(c_ubyte), c_int, POINTER(c_char_p)]
print_formula_c.restype = None
free_tbucw_c = lib.free_tbucw
free_tbucw_c.argtypes = [POINTER(TBUCW)]
free_tbucw_c.restype = None
optimize_tbucw_c = lib.optimize_tbucw
optimize_tbucw_c.argtypes = [POINTER(TBUCW), POINTER(c_byte)]
optimize_tbucw_c.restype = POINTER(TBUCW)
reset_tbucw_states_labels_c = lib.reset_tbucw_states_labels
reset_tbucw_states_labels_c.argtypes = [POINTER(TBUCW)]
reset_tbucw_states_labels_c.restype = None
set_weight_function_c = lib.set_weight_function
set_weight_function_c.argtypes = [POINTER(TBUCW), c_byte, POINTER(POINTER(c_int))]
set_weight_function_c.restype = POINTER(TBUCW)
set_dimension_c = lib.set_dimension
set_dimension_c.argtypes = [POINTER(TBUCW), c_int]
set_dimension_c.restype = POINTER(TBUCW)
##GList
is_link_null_c = lib.is_link_null
is_link_null_c.argtypes = [POINTER(GList)]
is_link_null_c.restype = c_byte
get_link_data_c = lib.get_link_data
get_link_data_c.argtypes = [POINTER(GList)]
get_link_data_c.restype = POINTER(Tuple)
## CountingFunction
build_cf_info_c = lib.build_cf_info
build_cf_info_c.argtypes = [POINTER(TBUCW), c_int]
build_cf_info_c.restype = POINTER(GNode)
compose_cf_info_c = lib.compose_cf_info
compose_cf_info_c.argtypes = [POINTER(POINTER(GNode)), c_int]
compose_cf_info_c.restype = POINTER(GNode)
##Tuple
build_initial_tuple_c = lib.build_initial_tuple
build_initial_tuple_c.argtypes = [POINTER(GNode), c_int, POINTER(c_int)]
build_initial_tuple_c.restype = POINTER(Tuple)
set_not_defined_tuple_c = lib.set_not_defined_tuple
set_not_defined_tuple_c.argtypes = None
set_not_defined_tuple_c.restype = None
compare_tuples_c = lib.compare_tuples
compare_tuples_c.argtypes = [POINTER(Tuple), POINTER(Tuple)]
compare_tuples_c.restype = c_byte
tuple_succ_c = lib.tuple_succ
tuple_succ_c.argtypes = [POINTER(Tuple), c_int, POINTER(AlphabetInfo)]
tuple_succ_c.restype = POINTER(Tuple)
clone_tuple_c = lib.clone_tuple
clone_tuple_c.argtypes = [POINTER(Tuple)]
clone_tuple_c.restype = c_void_p
compose_tuples_c = lib.compose_tuples
compose_tuples_c.argtypes = [POINTER(POINTER(Tuple)), c_int, POINTER(GNode)]
compose_tuples_c.restype = c_void_p
print_tuple_c = lib.print_tuple
print_tuple_c.argtypes = [POINTER(Tuple)]
print_tuple_c.restype = None
free_tuple_full_c = lib.free_tuple_full
free_tuple_full_c.argtypes = [POINTER(Tuple)]
free_tuple_full_c.restype = None
free_not_defined_tuple_c = lib.free_not_defined_tuple
free_not_defined_tuple_c.argtypes = None
free_not_defined_tuple_c.restype = None
##Antichain
PRINT_ELEMENT_FUNC = CFUNCTYPE(None, c_void_p)
PRINT_TUPLE_FUNC = CFUNCTYPE(None, POINTER(Tuple))
COMPARE_TUPLES_FUNC = CFUNCTYPE(c_byte, POINTER(Tuple), POINTER(Tuple))
FREE_TUPLE_FULL_FUNC = CFUNCTYPE(None, POINTER(Tuple))
CLONE_TUPLE_FUNC = CFUNCTYPE(c_void_p, POINTER(Tuple))
COMPOSE_TUPLES_FUNC = CFUNCTYPE(c_void_p, POINTER(POINTER(Tuple)), c_int, POINTER(GNode))
compare_antichains_c = lib.compare_antichains
compare_antichains_c.argtypes = [POINTER(Antichain), POINTER(Antichain), COMPARE_TUPLES_FUNC]
compare_antichains_c.restype = c_byte
contains_element_c = lib.contains_element
contains_element_c.argtypes = [POINTER(Antichain), c_void_p, COMPARE_TUPLES_FUNC]
contains_element_c.restype = c_byte
compose_antichains_c = lib.compose_antichains
compose_antichains_c.argtypes = [POINTER(POINTER(Antichain)), c_int, COMPOSE_TUPLES_FUNC, POINTER(GNode)]
compose_antichains_c.restype = POINTER(Antichain)
clone_antichain_c = lib.clone_antichain
clone_antichain_c.argtypes = [POINTER(Antichain), CLONE_TUPLE_FUNC]
clone_antichain_c.restype = POINTER(Antichain)
free_antichain_full_c = lib.free_antichain_full
free_antichain_full_c.argtypes = [POINTER(Antichain), FREE_TUPLE_FULL_FUNC]
free_antichain_full_c.restype = None
print_antichain_c = lib.print_antichain
print_antichain_c.argtypes = [POINTER(Antichain), PRINT_TUPLE_FUNC]
print_antichain_c.restype = None
##BackwardAlgorithm
build_start_antichain_c = lib.build_start_antichain
build_start_antichain_c.argtypes = [c_byte, POINTER(GNode)]
build_start_antichain_c.restype = POINTER(Antichain)
pre_c = lib.pre
pre_c.argtypes = [POINTER(Antichain), POINTER(Antichain), c_byte, POINTER(AlphabetInfo)]
pre_c.restype = POINTER(Antichain)
pre_crit_c = lib.pre_crit
pre_crit_c.argtypes = [POINTER(Antichain), POINTER(Antichain), POINTER(c_int), POINTER(AlphabetInfo)]
pre_crit_c.restype = POINTER(Antichain)
compute_critical_set_c = lib.compute_critical_set
compute_critical_set_c.argtypes = [POINTER(Antichain), POINTER(AlphabetInfo)]
compute_critical_set_c.restype = POINTER(c_int)
##SafetyGame
new_safety_game_c = lib.new_safety_game
new_safety_game_c.argtypes = [POINTER(Antichain), POINTER(Antichain), c_byte]
new_safety_game_c.restype = POINTER(SafetyGame)
add_credits_to_safety_game_c = lib.add_credits_to_safety_game
add_credits_to_safety_game_c.argtypes = [POINTER(SafetyGame), c_int, POINTER(c_int)]
add_credits_to_safety_game_c.restype = POINTER(SafetyGame)
free_safety_game_c = lib.free_safety_game
free_safety_game_c.argtypes = [POINTER(SafetyGame)]
free_safety_game_c.restype = None
##Cache
initialize_cache_c = lib.initialize_cache
initialize_cache_c.argtypes = None
initialize_cache_c.restype = None
initialize_cache_critical_set_c = lib.initialize_cache_critical_set
initialize_cache_critical_set_c.argtypes = None
initialize_cache_critical_set_c.restype = None
clean_cache_c = lib.clean_cache
clean_cache_c.argtypes = None
clean_cache_c.restype = None
clean_cache_critical_set_c = lib.clean_cache_critical_set
clean_cache_critical_set_c.argtypes = None
clean_cache_critical_set_c.restype = None
##ForwardAlgorithm
otfur_c = lib.otfur
otfur_c.argtypes = [POINTER(Antichain), POINTER(Antichain), POINTER(GNode), POINTER(AlphabetInfo), c_byte, c_int, POINTER(c_int)]
otfur_c.restype = POINTER(OtfurResult)
##Synthesis
extract_strategies_from_safety_game_c = lib.extract_strategies_from_safety_game
extract_strategies_from_safety_game_c.argtypes = [POINTER(SafetyGame), POINTER(AlphabetInfo), c_byte, c_byte, c_byte]
extract_strategies_from_safety_game_c.restype = POINTER(TransitionSystem)
has_a_winning_strategy_c = lib.has_a_winning_strategy
has_a_winning_strategy_c.argtypes = [POINTER(SafetyGame), POINTER(AlphabetInfo), c_byte]
has_a_winning_strategy_c.restype = c_byte
##TransitionSystem
get_ts_state_c = lib.get_ts_state
get_ts_state_c.argtypes = [POINTER(TransitionSystem), c_int]
get_ts_state_c.restype = POINTER(TSState)
get_ts_transition_from_link_data_c = lib.get_ts_transition_from_link
get_ts_transition_from_link_data_c.argtypes = [POINTER(GList)]
get_ts_transition_from_link_data_c.restype = POINTER(TSTransition)
is_ts_state_null_c = lib.is_ts_state_null
is_ts_state_null_c.argtypes = [POINTER(TSState)]
is_ts_state_null_c.restype = c_byte
free_transition_system_c = lib.free_transition_system
free_transition_system_c.argtypes = [POINTER(TransitionSystem)]
free_transition_system_c.restype = None
#MemoryManagement
free_c = lib.free_memory
free_c.argtypes = [c_void_p]
free_c.restype = None
| gpl-3.0 | -3,374,374,108,684,247,000 | 34.274123 | 129 | 0.673858 | false |
mbucas/python-route53 | doc_src/conf.py | 4 | 8017 | # -*- coding: utf-8 -*-
#
# python-route53 documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 4 21:03:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-route53'
copyright = u'2012, Greg Taylor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-route53doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-route53.tex', u'python-route53 Documentation',
u'Greg Taylor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-route53', u'python-route53 Documentation',
[u'Greg Taylor'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python-route53', u'python-route53 Documentation',
u'Greg Taylor', 'python-route53', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit | -6,281,519,880,234,041,000 | 31.589431 | 107 | 0.706249 | false |
karthiks1995/dejavu | dejavu/fingerprint.py | 15 | 5828 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks):
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
| mit | 5,121,042,317,973,186,000 | 36.6 | 148 | 0.590254 | false |
mdkent/percona-xtrabackup | test/kewpie/percona_tests/xtrabackup_disabled/ib_slave_test.py | 42 | 7074 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import shutil
import time
import unittest
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[],[]]
servers = []
server_manager = None
test_executor = None
# we explicitly use the --no-timestamp option
# here. We will be using a generic / vanilla backup dir
backup_path = None
class basicTest(mysqlBaseTestCase):
def setUp(self):
master_server = servers[0] # assumption that this is 'master'
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
# remove backup path
if os.path.exists(backup_path):
shutil.rmtree(backup_path)
os.mkdir(backup_path)
def test_basic1(self):
if servers[0].type not in ['mysql','percona']:
return
else:
self.servers = servers
innobackupex = test_executor.system_manager.innobackupex_path
xtrabackup = test_executor.system_manager.xtrabackup_path
master_server = servers[0] # assumption that this is 'master'
slave_server = servers[1]
backup_path = os.path.join(master_server.vardir, '_xtrabackup')
output_path = os.path.join(master_server.vardir, 'innobackupex.out')
exec_path = os.path.dirname(innobackupex)
# populate our server with a test bed
test_cmd = "./gentest.pl --gendata=conf/percona/percona.zz"
retcode, output = self.execute_randgen(test_cmd, test_executor, master_server)
#self.assertEqual(retcode, 0, msg=output)
# take a backup
cmd = [ innobackupex
,"--defaults-file=%s" %master_server.cnf_file
,"--user=root"
,"--socket=%s" %master_server.socket_file
,"--slave-info"
," --ibbackup=%s" %xtrabackup
,backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
main_backup_path = self.find_backup_path(output)
self.assertEqual(retcode, 0, msg = output)
# shutdown our slave server
slave_server.stop()
# prepare our backup
cmd = [ innobackupex
, "--apply-log"
, "--use-memory=500M"
, "--ibbackup=%s" %xtrabackup
, main_backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode, 0, msg = output)
# remove old datadir
shutil.rmtree(slave_server.datadir)
os.mkdir(slave_server.datadir)
# restore from backup
cmd = [ innobackupex
, "--defaults-file=%s" %slave_server.cnf_file
, "--copy-back"
, "--ibbackup=%s" %xtrabackup
, main_backup_path
]
cmd = " ".join(cmd)
retcode, output = self.execute_cmd(cmd, output_path, exec_path, True)
self.assertEqual(retcode,0, msg = output)
# get binlog info for slave
slave_file_name = 'xtrabackup_binlog_pos_innodb'
"""
for slave_file in ['xtrabackup_slave_info', 'xtrabackup_binlog_pos_innodb']:
slave_file_path = os.path.join(slave_server.datadir,slave_file)
with open(slave_file_path,'r') as slave_data:
print "File: %s" %slave_file
for line in slave_data:
print line, '<<<<'
# end test code
"""
slave_file_path = os.path.join(slave_server.datadir,slave_file_name)
slave_file = open(slave_file_path,'r')
binlog_file, binlog_pos = slave_file.readline().strip().split('\t')
binlog_file = os.path.basename(binlog_file)
slave_file.close()
# restart server (and ensure it doesn't crash)
slave_server.start()
self.assertEqual( slave_server.status, 1
, msg = 'Server failed restart from restored datadir...')
# update our slave's master info/ start replication
# we don't use server.set_master() method as we want
# to use binlog info produced by xtrabackup
# TODO: add these as parameters?
query = ("CHANGE MASTER TO "
"MASTER_HOST='127.0.0.1',"
"MASTER_USER='root',"
"MASTER_PASSWORD='',"
"MASTER_PORT=%d,"
"MASTER_LOG_FILE='%s',"
"MASTER_LOG_POS=%d" % ( master_server.master_port
, binlog_file
, int(binlog_pos)))
retcode, result_set = self.execute_query(query, slave_server)
self.assertEqual(retcode, 0, msg=result_set)
# TODO: check the slave status?
# /implement method to handle the check?
slave_server.slave_start()
# compare master/slave states
result = self.check_slaves_by_checksum(master_server,[slave_server])
self.assertEqual(result,None,msg=result)
# create a new table on the master
query = ("CREATE TABLE t1 "
"(col1 int NOT NULL AUTO_INCREMENT PRIMARY KEY )"
)
retcode, result_set = self.execute_query(query, master_server)
# insert some rows
query = "INSERT INTO t1 VALUES (),(),(),(),()"
retcode, result_set = self.execute_query(query, master_server)
self.assertEqual(retcode,0,msg=result_set)
# wait a bit for the slave
# TODO: proper poll routine
time.sleep(5)
for query in ["SHOW CREATE TABLE t1"
,"SELECT * FROM t1"]:
diff = self.check_slaves_by_query(master_server, [slave_server], query)
self.assertEqual(diff,None,msg=diff)
| gpl-2.0 | -2,056,828,870,813,397,000 | 39.890173 | 90 | 0.55598 | false |
Callek/build-relengapi | relengapi/blueprints/slaveloan/tasks.py | 1 | 12195 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
import socket
from furl import furl
import bzrest
import requests
from requests import RequestException
from flask import current_app
from functools import wraps
from redo import retry
from relengapi.blueprints.slaveloan import bugzilla
from relengapi.blueprints.slaveloan import slave_mappings
from relengapi.blueprints.slaveloan.model import History
from relengapi.blueprints.slaveloan.model import Loans
from relengapi.blueprints.slaveloan.model import Machines
from relengapi.blueprints.slaveloan.model import ManualActions
from relengapi.lib.celery import task
from relengapi.util import tz
import celery
import structlog
logger = structlog.get_logger()
def add_task_to_history(loanid, msg):
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
history = History(for_loan=l,
timestamp=tz.utcnow(),
msg=msg)
session.add(history)
session.commit()
logger.debug("Log_line: %s" % msg)
def add_to_history(before=None, after=None):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
bound_task = None
loanid = kwargs.get("loanid", None)
if args and isinstance(args[0], celery.Task):
bound_task = args[0]
if before:
add_task_to_history(loanid, before.format(**locals()))
retval = f(*args, **kwargs)
if after:
add_task_to_history(loanid, after.format(**locals()))
return retval
return wrapper
return decorator
@task(bind=True)
@add_to_history(
before="Choosing an inhouse machine based on slavealloc",
after="Chose inhouse machine {retval!s}")
def choose_inhouse_machine(self, loanid, loan_class):
logger.debug("Choosing inhouse machine")
url = furl(current_app.config.get("SLAVEALLOC_URL", None))
# XXX: ToDo raise fatal if no slavealloc
url.path.add("slaves")
url.args["enabled"] = 1
try:
all_slaves = requests.get(str(url)).json()
except RequestException as exc:
logger.exception("Exception: %s" % exc)
self.retry(exc=exc)
# pylint silence
# available_slaves = filter(slave_mappings.slave_filter(loan_class), all_slaves)
available_slaves = [slave for slave in all_slaves
if slave_mappings.slave_filter(loan_class)(slave)]
chosen = random.choice(available_slaves)
logger.debug("Chosen Slave = %s" % chosen)
return chosen['name']
@task(bind=True)
@add_to_history(
before="Identifying aws machine name to use",
after="Chose aws machine {retval!s}")
def choose_aws_machine(self, loanid, loan_class):
logger.debug("Choosing aws machine name")
# We use foo-$user_shortname$N where $N is optional only if
# there exists another active loan with the foo-$user prefix
l = Loans.query.get(loanid)
prefix = slave_mappings.slavetype_to_awsprefix(loan_class)
user_shortname = l.human.ldap.split("@")[0]
bare_name = prefix + "-" + user_shortname
similar_loans = Loans.query \
.filter(Loans.machine_id == Machines.id) \
.filter(Machines.fqdn.like(bare_name + "%")) \
.filter(~Loans.status.in_(["COMPLETE"])) \
.order_by(Machines.fqdn.desc())
if similar_loans.count():
existing_aws_loan = similar_loans.first().machine.fqdn
shortname = existing_aws_loan.split(".")[0]
this_name = bare_name + str(int(shortname[len(bare_name):])) + 1
else:
this_name = bare_name
logger.debug("Chosen Slave Name = %s" % this_name)
return this_name
@task(bind=True, max_retries=None)
@add_to_history(
before="Identifying FQDN and IP of {args[1]}",
after="Acquired FQDN and IP")
def fixup_machine(self, machine, loanid):
try:
fqdn = socket.getfqdn("%s.build.mozilla.org" % machine)
ipaddress = socket.gethostbyname("%s.build.mozilla.org" % machine)
session = current_app.db.session('relengapi')
m = Machines.as_unique(session,
fqdn=fqdn,
ipaddress=ipaddress)
# Re-check validity of fqdn and ip
if m.fqdn != fqdn:
m.fqdn = fqdn
if m.ipaddress != ipaddress:
m.ipaddress = ipaddress
l = session.query(Loans).get(loanid)
l.machine = m
session.commit()
except Exception as exc: # pylint: disable=W0703
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
before="Setup tracking bug for {args[1]}",
after="Tracking bug {retval!s} linked with loan")
def bmo_set_tracking_bug(self, machine, loanid):
try:
l = Loans.query.get(loanid)
assert l.bug_id
bug_comment = "Being loaned to %s in Bug %s" % (l.human.ldap, l.bug_id)
tracking_bug = bugzilla.ProblemTrackingBug(machine, loadInfo=False)
try:
tracking_bug.refresh()
except bzrest.errors.BugNotFound:
logger.info("Couldn't find bug, creating it...")
tracking_bug.create(comment=bug_comment, depends_on=l.bug_id)
if tracking_bug.data:
data = {
"depends_on": {
"add": [l.bug_id],
},
}
if not tracking_bug.data["is_open"]:
data["status"] = "REOPENED"
tracking_bug.add_comment(bug_comment, data=data)
if not tracking_bug.id:
raise ValueError("Unexpected result from bmo, retry")
return tracking_bug.id
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
before="Disabling in slavealloc (via slaveapi)",
after="Disable request sent to slavealloc (via slaveapi)")
def slavealloc_disable(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("disable")
loan_bug = Loans.query.get(loanid).bug_id
postdata = dict(reason="Being loaned on slaveloan bug %s" % loan_bug)
retry(requests.post, args=(str(url),), kwargs=dict(data=postdata)).json()
return machine
except Exception as exc: # pylint: disable=W0703
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
before="Filing the loan bug if needed",
after="Loan is tracked in bug {retval!s}")
def bmo_file_loan_bug(self, loanid, slavetype, *args, **kwargs):
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
if l.bug_id:
# Nothing to do, bug ID passed in
return l.bug_id
bmo_id = l.human.bugzilla
bug_id = bugzilla.create_loan_bug(loan_id=loanid,
slavetype=slavetype,
bugzilla_username=bmo_id)
if not bug_id:
raise ValueError("Unexpected result from bmo, retry")
l.bug_id = bug_id
session.commit()
return bug_id
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True)
@add_to_history(
after="Waiting for a human to perform {kwargs[action_name]} (id {retval!s})")
def register_action_needed(self, loanid, action_name):
if not action_name:
raise ValueError("must supply an action name")
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
if action_name == "add_to_vpn":
action_message = (
"Add user (%s) and machine (%s) to the VPN. "
"Following https://wiki.mozilla.org/ReleaseEngineering/How_To/Update_VPN_ACL"
% (l.human.ldap, l.machine.fqdn)
)
elif action_name == "create_aws_system":
action_message = (
"Create an aws machine for %s of the type requested (see loan history)."
" Following "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#AWS_machines"
% (l.human.ldap,)
)
elif action_name == "clean_secrets":
action_message = (
"Clean secrets from the machine. See instructions at "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#Cleaning"
)
elif action_name == "notify_complete":
action_message = (
"Notify the loanee in e-mail and the loan bug (Bug %s) that the loan is ready. "
"See template text for both in "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave#Notifying"
% l.bug_id
)
elif action_name == "gpo_switch":
action_message = (
"Need to switch host (%s) to be in the Loaner GPO group. Follow "
"https://wiki.mozilla.org/ReleaseEngineering/How_To/Loan_a_Slave"
"#t-xp32-ix.2C_t-w732-ix.2C_t-w864-ix.2C_w64-ix-slave "
"for more information"
% (l.machine.fqdn)
)
else:
raise ValueError("Invalid action name")
action = ManualActions(for_loan=l,
timestamp_start=tz.utcnow(),
msg=action_message)
session.add(action)
session.commit()
return action.id
except ValueError:
raise # Don't indefinitely retry in this case
except Exception as exc:
self.retry(exc=exc)
@task(bind=True, max_retries=None, default_retry_delay=60)
@add_to_history(
after="Noticed that a human performed pending action (id {args[1]}), continuing")
def waitfor_action(self, action_id, loanid):
try:
action = ManualActions.query.get(action_id)
if not action.timestamp_complete:
raise Exception("Retry me")
except Exception as exc:
logger.debug("Retrying...")
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
before="Calling slaveapi's disable method to disable from buildbot",
after="Disable request sent")
def start_disable_slave(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("shutdown_buildslave")
ret = retry(requests.post, args=(str(url),), ).json()
return (ret["requestid"], machine)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
after="Noticed that machine was disabled (or waiting timed out)")
def waitfor_disable_slave(self, data, loanid):
requestid, machine = data
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("shutdown_buildslave")
url.args["requestid"] = requestid
ret = retry(requests.get, args=(str(url),), kwargs=dict()).json()
if ret["state"] in (0, 1):
# 0 = PENDING, 1 = RUNNING (3=Failed and 2=Success)
raise Exception("Continue waiting for disabled slave")
except Exception as exc:
self.retry(exc=exc)
@task(bind=True, max_retries=None)
@add_to_history(
after="Marked loan as ACTIVE")
def mark_loan_status(self, loanid, status):
try:
session = current_app.db.session('relengapi')
l = session.query(Loans).get(loanid)
l.status = status
session.commit()
except Exception as exc:
self.retry(exc=exc)
@task()
def dummy_task(*args, **kwargs):
pass
bmo_file_gpo_bug = dummy_task
bmo_waitfor_bug = dummy_task
clean_secrets = dummy_task
update_loan_bug_with_details = dummy_task
email_loan_details = dummy_task
| mpl-2.0 | -3,238,224,967,578,969,600 | 35.079882 | 96 | 0.609348 | false |
kaiweifan/neutron | neutron/plugins/nec/drivers/__init__.py | 9 | 1528 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DRIVER_PATH = "neutron.plugins.nec.drivers.%s"
DRIVER_LIST = {
'trema': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_port': DRIVER_PATH % "trema.TremaPortBaseDriver",
'trema_portmac': DRIVER_PATH % "trema.TremaPortMACBaseDriver",
'trema_mac': DRIVER_PATH % "trema.TremaMACBaseDriver",
'pfc': DRIVER_PATH % "pfc.PFCV4Driver",
'pfc_v3': DRIVER_PATH % "pfc.PFCV3Driver",
'pfc_v4': DRIVER_PATH % "pfc.PFCV4Driver",
'pfc_v5': DRIVER_PATH % "pfc.PFCV5Driver",
}
def get_driver(driver_name):
LOG.info(_("Loading OFC driver: %s"), driver_name)
driver_klass = DRIVER_LIST.get(driver_name) or driver_name
return importutils.import_class(driver_klass)
| apache-2.0 | 8,547,695,502,460,841,000 | 38.179487 | 78 | 0.714005 | false |
zhjunlang/kbengine | kbe/src/lib/python/Lib/tkinter/test/runtktests.py | 71 | 2271 | """
Use this module to get and run all tk tests.
tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc', '__init.pyo'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that
want their tests collected.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s.%s" % (pkg_name, name[:-len(py_ext)]),
"tkinter.test")
except test.support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.support.run_unittest(*get_tests())
| lgpl-3.0 | -1,675,436,790,387,805,000 | 30.985915 | 78 | 0.590048 | false |
lepisma/gaze | src/helper.py | 1 | 3299 | """
Helper functions
"""
import numpy as np
def xgrad(gray_image):
"""
Returns the X gradient of grayscale image,
imitating MatLab's gradient function
Parameters
----------
gray_image : numpy.ndarray
Grayscale image
Returns
-------
numpy.ndarray
X gradient of image
"""
gray = np.array(gray_image, dtype = np.float32)
grad = np.column_stack(((gray[:, 1] - gray[:, 0]), \
(gray[:, 2 :] - gray[:, 0 : -2]) / 2, \
(gray[:, -1] - gray[:, -2])))
return grad
def ygrad(gray_image):
"""
Returns the Y gradient of grayscale image,
imitating MatLab's gradient function
Parameters
----------
gray_image : numpy.ndarray
Grayscale image
Returns
-------
numpy.ndarray
Y gradient of image
"""
grad = xgrad(gray_image.T).T
return grad
def test_possible_centers(pos_x, pos_y, weight, grad_x, grad_y, out_image):
"""
Calculates the dot product between
- Vector from all possible centers to gradient origin
- Gradient vector at the given point of gradient origin
Parameters
----------
pos_x, pos_y : int
Position of gradient origin
weight : float
Weight of gradient
grad_x, grad_y : int
Value of gradients at pos
out_image : numpy.ndarray
Accumulator matrix (of same size as image) to keep track of
cumulative sum of dot products
"""
rows, columns = out_image.shape
x_accu = np.tile(np.linspace(1, columns - 1, columns), [rows, 1])
y_accu = np.tile(np.linspace(1, rows - 1, rows), [columns, 1]).T
x_accu = pos_x - x_accu
y_accu = pos_y - y_accu
mag = np.sqrt((x_accu ** 2) + (y_accu ** 2))
# Normalize
x_accu /= mag
y_accu /= mag
x_accu[np.isnan(x_accu)] = 0
y_accu[np.isnan(y_accu)] = 0
# Dot product
prod = (x_accu * grad_x) + (y_accu * grad_y)
prod[prod < 0] = 0
out_image += prod * prod * weight
return
def find_center(grad_x, grad_y, out_image):
"""
Finds the center of eye from given grayscale image's gradients
Parameters
----------
grad_x : numpy.ndarray
Array of x gradients
grad_y : numpy.ndarray
Array of y gradients
Returns
-------
(x, y) : tuple
The pixel index of eye's center, relative to grad images
"""
rows, columns = grad_x.shape
#pos_list = coords(np.arange(rows), np.arange(columns))
x_pos = np.repeat(np.arange(rows), columns)
y_pos = np.tile(np.arange(columns), rows)
x_grad = grad_x.ravel(order = 'F')
y_grad = grad_y.ravel(order = 'F')
v_possible_centers = np.vectorize(test_possible_centers, excluded = ["out_image"])
v_possible_centers(x_pos, y_pos, 1.0, x_grad, y_grad, out_image = out_image)
return np.unravel_index(out_image.argmax(), out_image.shape)
#out_image /= np.max(out_image)
#out_image *= 255
#return out_image
def coords(*arrays):
"""
Returns cartesian coordinate combinations from given arrays
"""
grid = np.meshgrid(*arrays)
coord_list = [entry.ravel() for entry in grid]
points = np.vstack(coord_list).T
return points.tolist()
| mit | -5,288,348,261,107,824,000 | 22.733813 | 86 | 0.574113 | false |
vinhlh/bite-project | deps/gdata-python-client/src/gdata/exif/__init__.py | 253 | 6981 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.exif, implementing the exif namespace in gdata
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module maps elements from the {EXIF} namespace[1] to GData objects.
These elements describe image data, using exif attributes[2].
Picasa Web Albums uses the exif namespace to represent Exif data encoded
in a photo [3].
Picasa Web Albums uses the following exif elements:
exif:distance
exif:exposure
exif:flash
exif:focallength
exif:fstop
exif:imageUniqueID
exif:iso
exif:make
exif:model
exif:tags
exif:time
[1]: http://schemas.google.com/photos/exif/2007.
[2]: http://en.wikipedia.org/wiki/Exif
[3]: http://code.google.com/apis/picasaweb/reference.html#exif_reference
"""
__author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
EXIF_NAMESPACE = 'http://schemas.google.com/photos/exif/2007'
class ExifBaseElement(atom.AtomBase):
"""Base class for elements in the EXIF_NAMESPACE (%s). To add new elements, you only need to add the element tag name to self._tag
""" % EXIF_NAMESPACE
_tag = ''
_namespace = EXIF_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Distance(ExifBaseElement):
"(float) The distance to the subject, e.g. 0.0"
_tag = 'distance'
def DistanceFromString(xml_string):
return atom.CreateClassFromXMLString(Distance, xml_string)
class Exposure(ExifBaseElement):
"(float) The exposure time used, e.g. 0.025 or 8.0E4"
_tag = 'exposure'
def ExposureFromString(xml_string):
return atom.CreateClassFromXMLString(Exposure, xml_string)
class Flash(ExifBaseElement):
"""(string) Boolean value indicating whether the flash was used.
The .text attribute will either be `true' or `false'
As a convenience, this object's .bool method will return what you want,
so you can say:
flash_used = bool(Flash)
"""
_tag = 'flash'
def __bool__(self):
if self.text.lower() in ('true','false'):
return self.text.lower() == 'true'
def FlashFromString(xml_string):
return atom.CreateClassFromXMLString(Flash, xml_string)
class Focallength(ExifBaseElement):
"(float) The focal length used, e.g. 23.7"
_tag = 'focallength'
def FocallengthFromString(xml_string):
return atom.CreateClassFromXMLString(Focallength, xml_string)
class Fstop(ExifBaseElement):
"(float) The fstop value used, e.g. 5.0"
_tag = 'fstop'
def FstopFromString(xml_string):
return atom.CreateClassFromXMLString(Fstop, xml_string)
class ImageUniqueID(ExifBaseElement):
"(string) The unique image ID for the photo. Generated by Google Photo servers"
_tag = 'imageUniqueID'
def ImageUniqueIDFromString(xml_string):
return atom.CreateClassFromXMLString(ImageUniqueID, xml_string)
class Iso(ExifBaseElement):
"(int) The iso equivalent value used, e.g. 200"
_tag = 'iso'
def IsoFromString(xml_string):
return atom.CreateClassFromXMLString(Iso, xml_string)
class Make(ExifBaseElement):
"(string) The make of the camera used, e.g. Fictitious Camera Company"
_tag = 'make'
def MakeFromString(xml_string):
return atom.CreateClassFromXMLString(Make, xml_string)
class Model(ExifBaseElement):
"(string) The model of the camera used,e.g AMAZING-100D"
_tag = 'model'
def ModelFromString(xml_string):
return atom.CreateClassFromXMLString(Model, xml_string)
class Time(ExifBaseElement):
"""(int) The date/time the photo was taken, e.g. 1180294337000.
Represented as the number of milliseconds since January 1st, 1970.
The value of this element will always be identical to the value
of the <gphoto:timestamp>.
Look at this object's .isoformat() for a human friendly datetime string:
photo_epoch = Time.text # 1180294337000
photo_isostring = Time.isoformat() # '2007-05-27T19:32:17.000Z'
Alternatively:
photo_datetime = Time.datetime() # (requires python >= 2.3)
"""
_tag = 'time'
def isoformat(self):
"""(string) Return the timestamp as a ISO 8601 formatted string,
e.g. '2007-05-27T19:32:17.000Z'
"""
import time
epoch = float(self.text)/1000
return time.strftime('%Y-%m-%dT%H:%M:%S.000Z', time.gmtime(epoch))
def datetime(self):
"""(datetime.datetime) Return the timestamp as a datetime.datetime object
Requires python 2.3
"""
import datetime
epoch = float(self.text)/1000
return datetime.datetime.fromtimestamp(epoch)
def TimeFromString(xml_string):
return atom.CreateClassFromXMLString(Time, xml_string)
class Tags(ExifBaseElement):
"""The container for all exif elements.
The <exif:tags> element can appear as a child of a photo entry.
"""
_tag = 'tags'
_children = atom.AtomBase._children.copy()
_children['{%s}fstop' % EXIF_NAMESPACE] = ('fstop', Fstop)
_children['{%s}make' % EXIF_NAMESPACE] = ('make', Make)
_children['{%s}model' % EXIF_NAMESPACE] = ('model', Model)
_children['{%s}distance' % EXIF_NAMESPACE] = ('distance', Distance)
_children['{%s}exposure' % EXIF_NAMESPACE] = ('exposure', Exposure)
_children['{%s}flash' % EXIF_NAMESPACE] = ('flash', Flash)
_children['{%s}focallength' % EXIF_NAMESPACE] = ('focallength', Focallength)
_children['{%s}iso' % EXIF_NAMESPACE] = ('iso', Iso)
_children['{%s}time' % EXIF_NAMESPACE] = ('time', Time)
_children['{%s}imageUniqueID' % EXIF_NAMESPACE] = ('imageUniqueID', ImageUniqueID)
def __init__(self, extension_elements=None, extension_attributes=None, text=None):
ExifBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.fstop=None
self.make=None
self.model=None
self.distance=None
self.exposure=None
self.flash=None
self.focallength=None
self.iso=None
self.time=None
self.imageUniqueID=None
def TagsFromString(xml_string):
return atom.CreateClassFromXMLString(Tags, xml_string)
| apache-2.0 | 7,752,702,114,392,744,000 | 31.16129 | 132 | 0.706835 | false |
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping | src/cpu/kvm/X86KvmCPU.py | 54 | 2012 | # Copyright (c) 2013 Andreas Sandberg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.params import *
from BaseKvmCPU import BaseKvmCPU
class X86KvmCPU(BaseKvmCPU):
type = 'X86KvmCPU'
cxx_header = "cpu/kvm/x86_cpu.hh"
@classmethod
def export_methods(cls, code):
code('''
void dumpFpuRegs();
void dumpIntRegs();
void dumpSpecRegs();
void dumpXCRs();
void dumpXSave();
void dumpVCpuEvents();
''')
useXSave = Param.Bool(True, "Use XSave to synchronize FPU/SIMD registers")
| bsd-3-clause | 3,008,131,704,492,663,300 | 41.808511 | 78 | 0.759443 | false |
liberorbis/libernext | apps/frappe/frappe/widgets/form/meta.py | 25 | 5903 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, os
from frappe.model.meta import Meta
from frappe.modules import scrub, get_module_path, load_doctype_module
from frappe.model.workflow import get_workflow_name
from frappe.utils import get_html_format
from frappe.translate import make_dict_from_messages, extract_messages_from_code
from frappe.utils.jinja import render_include
######
def get_meta(doctype, cached=True):
if cached:
meta = frappe.cache().get_value("form_meta:" + doctype, lambda: FormMeta(doctype))
else:
meta = FormMeta(doctype)
if frappe.local.lang != 'en':
meta.set_translations(frappe.local.lang)
return meta
class FormMeta(Meta):
def __init__(self, doctype):
super(FormMeta, self).__init__(doctype)
self.load_assets()
def load_assets(self):
self.add_search_fields()
if not self.istable:
self.add_linked_with()
self.add_code()
self.load_print_formats()
self.load_workflows()
self.load_templates()
def as_dict(self, no_nulls=False):
d = super(FormMeta, self).as_dict(no_nulls=no_nulls)
for k in ("__js", "__css", "__list_js", "__calendar_js", "__map_js",
"__linked_with", "__messages", "__print_formats", "__workflow_docs",
"__form_grid_templates", "__listview_template"):
d[k] = self.get(k)
for i, df in enumerate(d.get("fields")):
for k in ("link_doctype", "search_fields"):
df[k] = self.get("fields")[i].get(k)
return d
def add_code(self):
path = os.path.join(get_module_path(self.module), 'doctype', scrub(self.name))
def _get_path(fname):
return os.path.join(path, scrub(fname))
self._add_code(_get_path(self.name + '.js'), '__js')
self._add_code(_get_path(self.name + '.css'), "__css")
self._add_code(_get_path(self.name + '_list.js'), '__list_js')
self._add_code(_get_path(self.name + '_calendar.js'), '__calendar_js')
listview_template = _get_path(self.name + '_list.html')
if os.path.exists(listview_template):
self.set("__listview_template", get_html_format(listview_template))
self.add_code_via_hook("doctype_js", "__js")
self.add_custom_script()
def _add_code(self, path, fieldname):
js = frappe.read_file(path)
if js:
self.set(fieldname, (self.get(fieldname) or "") + "\n\n" + render_include(js))
def add_code_via_hook(self, hook, fieldname):
for app_name in frappe.get_installed_apps():
code_hook = frappe.get_hooks(hook, default={}, app_name=app_name)
if not code_hook:
continue
files = code_hook.get(self.name, [])
if not isinstance(files, list):
files = [files]
for file in files:
path = frappe.get_app_path(app_name, *file.strip("/").split("/"))
self._add_code(path, fieldname)
def add_custom_script(self):
"""embed all require files"""
# custom script
custom = frappe.db.get_value("Custom Script", {"dt": self.name,
"script_type": "Client"}, "script") or ""
self.set("__js", (self.get('__js') or '') + "\n\n" + custom)
def add_search_fields(self):
"""add search fields found in the doctypes indicated by link fields' options"""
for df in self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]}):
if df.options:
search_fields = frappe.get_meta(df.options).search_fields
if search_fields:
df.search_fields = map(lambda sf: sf.strip(), search_fields.split(","))
def add_linked_with(self):
"""add list of doctypes this doctype is 'linked' with"""
links = frappe.db.sql("""select parent, fieldname from tabDocField
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links += frappe.db.sql("""select dt as parent, fieldname from `tabCustom Field`
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links = dict(links)
if not links:
return {}
ret = {}
for dt in links:
ret[dt] = { "fieldname": links[dt] }
for grand_parent, options in frappe.db.sql("""select parent, options from tabDocField
where fieldtype="Table"
and options in (select name from tabDocType
where istable=1 and name in (%s))""" % ", ".join(["%s"] * len(links)) ,tuple(links)):
ret[grand_parent] = {"child_doctype": options, "fieldname": links[options] }
if options in ret:
del ret[options]
self.set("__linked_with", ret)
def load_print_formats(self):
print_formats = frappe.db.sql("""select * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2 and ifnull(disabled, 0)=0""", (self.name,), as_dict=1,
update={"doctype":"Print Format"})
self.set("__print_formats", print_formats)
def load_workflows(self):
# get active workflow
workflow_name = get_workflow_name(self.name)
workflow_docs = []
if workflow_name and frappe.db.exists("Workflow", workflow_name):
workflow = frappe.get_doc("Workflow", workflow_name)
workflow_docs.append(workflow)
for d in workflow.get("workflow_document_states"):
workflow_docs.append(frappe.get_doc("Workflow State", d.state))
self.set("__workflow_docs", workflow_docs)
def load_templates(self):
module = load_doctype_module(self.name)
app = module.__name__.split(".")[0]
templates = {}
if hasattr(module, "form_grid_templates"):
for key, path in module.form_grid_templates.iteritems():
templates[key] = get_html_format(frappe.get_app_path(app, path))
self.set("__form_grid_templates", templates)
def set_translations(self, lang):
self.set("__messages", frappe.get_lang_dict("doctype", self.name))
# set translations for grid templates
if self.get("__form_grid_templates"):
for content in self.get("__form_grid_templates").values():
messages = extract_messages_from_code(content)
messages = make_dict_from_messages(messages)
self.get("__messages").update(messages)
| gpl-2.0 | 8,176,465,747,321,302,000 | 31.977654 | 91 | 0.667457 | false |
ashemedai/ansible | lib/ansible/modules/packaging/os/pkg5.py | 42 | 5050 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014 Peter Oliver <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:
- The naming of IPS packages is explained at U(http://www.oracle.com/technetwork/articles/servers-storage-admin/ips-package-versioning-2232906.html).
options:
name:
description:
- An FRMI of the package(s) to be installed/removed/updated.
- Multiple packages may be specified, separated by C(,).
required: true
state:
description:
- Whether to install (I(present), I(latest)), or remove (I(absent)) a
package.
required: false
default: present
choices: [ present, latest, absent ]
accept_licenses:
description:
- Accept any licences.
required: false
default: false
choices: [ true, false ]
aliases: [ accept_licences, accept ]
'''
EXAMPLES = '''
# Install Vim:
- pkg5:
name: editor/vim
# Remove finger daemon:
- pkg5:
name: service/network/finger
state: absent
# Install several packages at once:
- pkg5:
name:
- /file/gnu-findutils
- /text/gnu-grep
'''
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True, type='list'),
state=dict(
default='present',
choices=[
'present',
'installed',
'latest',
'absent',
'uninstalled',
'removed',
]
),
accept_licenses=dict(
type='bool',
default=False,
aliases=['accept_licences', 'accept'],
),
)
)
params = module.params
packages = []
# pkg(5) FRMIs include a comma before the release number, but
# AnsibleModule will have split this into multiple items for us.
# Try to spot where this has happened and fix it.
for fragment in params['name']:
if (
re.search('^\d+(?:\.\d+)*', fragment)
and packages and re.search('@[^,]*$', packages[-1])
):
packages[-1] += ',' + fragment
else:
packages.append(fragment)
if params['state'] in ['present', 'installed']:
ensure(module, 'present', packages, params)
elif params['state'] in ['latest']:
ensure(module, 'latest', packages, params)
elif params['state'] in ['absent', 'uninstalled', 'removed']:
ensure(module, 'absent', packages, params)
def ensure(module, state, packages, params):
response = {
'results': [],
'msg': '',
}
behaviour = {
'present': {
'filter': lambda p: not is_installed(module, p),
'subcommand': 'install',
},
'latest': {
'filter': lambda p: not is_latest(module, p),
'subcommand': 'install',
},
'absent': {
'filter': lambda p: is_installed(module, p),
'subcommand': 'uninstall',
},
}
if params['accept_licenses']:
accept_licenses = ['--accept']
else:
accept_licenses = []
to_modify = filter(behaviour[state]['filter'], packages)
if to_modify:
rc, out, err = module.run_command(
[
'pkg', behaviour[state]['subcommand']
]
+ accept_licenses
+ [
'-q', '--'
] + to_modify
)
response['rc'] = rc
response['results'].append(out)
response['msg'] += err
response['changed'] = True
if rc != 0:
module.fail_json(**response)
module.exit_json(**response)
def is_installed(module, package):
rc, out, err = module.run_command(['pkg', 'list', '--', package])
return not bool(int(rc))
def is_latest(module, package):
rc, out, err = module.run_command(['pkg', 'list', '-u', '--', package])
return bool(int(rc))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 6,713,490,162,843,288,000 | 27.370787 | 151 | 0.566535 | false |
douglaswei/stock | fxcm/prepare/parse.py | 1 | 2352 | # coding=utf-8
import struct
import time
import os, sys
def parse_hst_csv(src_path, des_path, time_from=None, time_to=None):
content = open(src_path, 'rb').read()
# 读取头文件结构信息
# 基础版本
print "basic verison: %i" % struct.unpack("i", content[0:4])[0]
# 版本信息
print "versoin: %s" % "".join(struct.unpack("64c", content[4:68]))
# 货币对名称
print "detail: %s" % "".join(struct.unpack("12c", content[68:80]))
# 周期 (单位分钟)
print "period: %i min" % struct.unpack("i", content[80:84])[0]
# 小数点位数
# print struct.unpack("i", content[84:88])
# 基准报时
# print struct.unpack("i", content[88:92])
# 同步时间
# print struct.unpack("i", content[92:96])
# 将来应用
# print struct.unpack("13i", content[96:148])
# 循环结构
content_len = len(content)
time_f = None if time_from is None else time.strptime(time_from, "%Y-%m-%d %H:%M")
time_t = None if time_to is None else time.strptime(time_to, "%Y-%m-%d %H:%M")
with open(des_path, "w") as des_file:
des_file.write("time,open,high,low,close,vol\n")
for tip in range(148, content_len, 60):
time_d = time.gmtime(struct.unpack("i", content[tip:tip + 4])[0])
# time_raw = time.strptime(time_d, "%a %b %d %H:%M:%S %Y")
if time_f is not None and time_f >= time_d:
continue
if time_to is not None and time_t < time_d:
continue
beg = struct.unpack("d", content[tip + 8:tip + 16])[0]
high = struct.unpack("d", content[tip + 16:tip + 24])[0]
low = struct.unpack("d", content[tip + 24:tip + 32])[0]
close = struct.unpack("d", content[tip + 32:tip + 40])[0]
vol = struct.unpack("i", content[tip + 40:tip + 44])[0]
des_file.write("%s,%f,%f,%f,%f,%d\n" % (time.strftime("%Y-%m-%d %H:%M:%S", time_d), beg, high, low, close, vol))
# print time.strftime("%Y-%m-%d-%H:%M", time_r), beg, high, low, close, vol
def process_data_dir(src_dir, des_dir, time_from=None, time_to=None):
for filename in os.listdir(src_dir):
if os.path.isfile(os.path.join(src_dir, filename)) and filename.endswith(".hst"):
src_file_path = os.path.join(src_dir, filename)
des_file_path = os.path.join(des_dir, filename.replace('.hst', '.csv'))
parse_hst_csv(src_file_path, des_file_path, time_from, time_to)
| gpl-2.0 | -3,818,980,083,436,185,600 | 30.746479 | 118 | 0.602484 | false |
elit3ge/SickRage | lib/github/AuthenticatedUser.py | 70 | 46818 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.UserKey
import github.Issue
import github.Event
import github.Authorization
import github.Notification
class AuthenticatedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents AuthenticatedUsers as returned for example by http://developer.github.com/v3/todo
"""
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def site_admin(self):
"""
:type: bool
"""
self._completeIfNotSet(self._site_admin)
return self._site_admin.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
)
def add_to_following(self, following):
"""
:calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/following/" + following._identity
)
def add_to_starred(self, starred):
"""
:calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/starred/" + starred._identity
)
def add_to_subscriptions(self, subscription):
"""
:calls: `PUT /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/subscriptions/" + subscription._identity
)
def add_to_watched(self, watched):
"""
:calls: `PUT /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/watched/" + watched._identity
)
def create_authorization(self, scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet, client_id=github.GithubObject.NotSet, client_secret=github.GithubObject.NotSet, onetime_password=None):
"""
:calls: `POST /authorizations <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param note: string
:param note_url: string
:param client_id: string
:param client_secret: string
:param onetime_password: string
:rtype: :class:`github.Authorization.Authorization`
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert note is github.GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
assert client_id is github.GithubObject.NotSet or isinstance(client_id, (str, unicode)), client_id
assert client_secret is github.GithubObject.NotSet or isinstance(client_secret, (str, unicode)), client_secret
assert onetime_password is None or isinstance(onetime_password, (str, unicode)), onetime_password
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
if client_id is not github.GithubObject.NotSet:
post_parameters["client_id"] = client_id
if client_secret is not github.GithubObject.NotSet:
post_parameters["client_secret"] = client_secret
if onetime_password is not None:
request_header = {'X-GitHub-OTP': onetime_password} # pragma no cover (Should be covered)
else:
request_header = None
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/authorizations",
input=post_parameters,
headers=request_header,
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks"
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_gist(self, public, files, description=github.GithubObject.NotSet):
"""
:calls: `POST /gists <http://developer.github.com/v3/gists>`_
:param public: bool
:param files: dict of string to :class:`github.InputFileContent.InputFileContent`
:param description: string
:rtype: :class:`github.Gist.Gist`
"""
assert isinstance(public, bool), public
assert all(isinstance(element, github.InputFileContent) for element in files.itervalues()), files
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
post_parameters = {
"public": public,
"files": dict((key, value._identity) for key, value in files.iteritems()),
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/gists",
input=post_parameters
)
return github.Gist.Gist(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /user/keys <http://developer.github.com/v3/users/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(title, (str, unicode)), title
assert isinstance(key, (str, unicode)), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/keys",
input=post_parameters
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, gitignore_template=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param auto_init: bool
:param gitignore_template: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def edit(self, name=github.GithubObject.NotSet, email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, location=github.GithubObject.NotSet, hireable=github.GithubObject.NotSet, bio=github.GithubObject.NotSet):
"""
:calls: `PATCH /user <http://developer.github.com/v3/users>`_
:param name: string
:param email: string
:param blog: string
:param company: string
:param location: string
:param hireable: bool
:param bio: string
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert hireable is github.GithubObject.NotSet or isinstance(hireable, bool), hireable
assert bio is github.GithubObject.NotSet or isinstance(bio, (str, unicode)), bio
post_parameters = dict()
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if hireable is not github.GithubObject.NotSet:
post_parameters["hireable"] = hireable
if bio is not github.GithubObject.NotSet:
post_parameters["bio"] = bio
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user",
input=post_parameters
)
self._useAttributes(data)
def get_authorization(self, id):
"""
:calls: `GET /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param id: integer
:rtype: :class:`github.Authorization.Authorization`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/authorizations/" + str(id)
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(
github.Authorization.Authorization,
self._requester,
"/authorizations",
None
)
def get_emails(self):
"""
:calls: `GET /user/emails <http://developer.github.com/v3/users/emails>`_
:rtype: list of string
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/emails"
)
return data
def get_events(self):
"""
:calls: `GET /events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/events",
None
)
def get_followers(self):
"""
:calls: `GET /user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/followers",
None
)
def get_following(self):
"""
:calls: `GET /user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/following",
None
)
def get_gists(self):
"""
:calls: `GET /gists <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists",
None
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_user_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /user/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_key(self, id):
"""
:calls: `GET /user/keys/:id <http://developer.github.com/v3/users/keys>`_
:param id: integer
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/keys/" + str(id)
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def get_keys(self):
"""
:calls: `GET /user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
"/user/keys",
None
)
def get_notification(self, id):
"""
:calls: `GET /notifications/threads/:id <http://developer.github.com/v3/activity/notifications>`_
:rtype: :class:`github.Notification.Notification`
"""
assert isinstance(id, (str, unicode)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/notifications/threads/" + id
)
return github.Notification.Notification(self._requester, headers, data, completed=True)
def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet):
"""
:calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification`
"""
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
# TODO: implement parameter "since"
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
)
def get_organization_events(self, org):
"""
:calls: `GET /users/:user/events/orgs/:org <http://developer.github.com/v3/activity/events>`_
:param org: :class:`github.Organization.Organization`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
assert isinstance(org, github.Organization.Organization), org
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/users/" + self.login + "/events/orgs/" + org.login,
None
)
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /user/repos <http://developer.github.com/v3/repos>`_
:param type: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/starred",
None
)
def get_starred_gists(self):
"""
:calls: `GET /gists/starred <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def get_teams(self):
"""
:calls: `GET /user/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
"/user/teams",
None
)
def get_watched(self):
"""
:calls: `GET /user/watched <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/watched",
None
)
def has_in_following(self, following):
"""
:calls: `GET /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
"/user/following/" + following._identity
)
return status == 204
def has_in_starred(self, starred):
"""
:calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(starred, github.Repository.Repository), starred
status, headers, data = self._requester.requestJson(
"GET",
"/user/starred/" + starred._identity
)
return status == 204
def has_in_subscriptions(self, subscription):
"""
:calls: `GET /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(subscription, github.Repository.Repository), subscription
status, headers, data = self._requester.requestJson(
"GET",
"/user/subscriptions/" + subscription._identity
)
return status == 204
def has_in_watched(self, watched):
"""
:calls: `GET /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
status, headers, data = self._requester.requestJson(
"GET",
"/user/watched/" + watched._identity
)
return status == 204
def remove_from_emails(self, *emails):
"""
:calls: `DELETE /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/emails",
input=post_parameters
)
def remove_from_following(self, following):
"""
:calls: `DELETE /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/following/" + following._identity
)
def remove_from_starred(self, starred):
"""
:calls: `DELETE /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/starred/" + starred._identity
)
def remove_from_subscriptions(self, subscription):
"""
:calls: `DELETE /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/subscriptions/" + subscription._identity
)
def remove_from_watched(self, watched):
"""
:calls: `DELETE /user/watched/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/watched/" + watched._identity
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._site_admin = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "site_admin" in attributes: # pragma no branch
self._site_admin = self._makeBoolAttribute(attributes["site_admin"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 | 9,208,537,430,826,643,000 | 39.430052 | 348 | 0.608698 | false |
fritsvanveen/QGIS | python/plugins/processing/algs/gdal/contour.py | 5 | 3823 | # -*- coding: utf-8 -*-
"""
***************************************************************************
contour.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Alexander Bruy
Email : alexander bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Alexander Bruy'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterString
from processing.core.outputs import OutputVector
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class contour(GdalAlgorithm):
INPUT_RASTER = 'INPUT_RASTER'
OUTPUT_VECTOR = 'OUTPUT_VECTOR'
INTERVAL = 'INTERVAL'
FIELD_NAME = 'FIELD_NAME'
EXTRA = 'EXTRA'
def getIcon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'contour.png'))
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Contour')
self.group, self.i18n_group = self.trAlgorithm('[GDAL] Extraction')
self.addParameter(ParameterRaster(self.INPUT_RASTER,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(self.INTERVAL,
self.tr('Interval between contour lines'), 0.0,
99999999.999999, 10.0))
self.addParameter(ParameterString(self.FIELD_NAME,
self.tr('Attribute name (if not set, no elevation attribute is attached)'),
'ELEV', optional=True))
self.addParameter(ParameterString(self.EXTRA,
self.tr('Additional creation parameters'), '', optional=True))
self.addOutput(OutputVector(self.OUTPUT_VECTOR,
self.tr('Contours')))
def getConsoleCommands(self):
output = self.getOutputValue(self.OUTPUT_VECTOR)
interval = str(self.getParameterValue(self.INTERVAL))
fieldName = str(self.getParameterValue(self.FIELD_NAME))
extra = self.getParameterValue(self.EXTRA)
if extra is not None:
extra = str(extra)
arguments = []
if len(fieldName) > 0:
arguments.append('-a')
arguments.append(fieldName)
arguments.append('-i')
arguments.append(interval)
driver = GdalUtils.getVectorDriverFromFileName(output)
arguments.append('-f')
arguments.append(driver)
if extra and len(extra) > 0:
arguments.append(extra)
arguments.append(self.getParameterValue(self.INPUT_RASTER))
arguments.append(output)
return ['gdal_contour', GdalUtils.escapeAndJoin(arguments)]
| gpl-2.0 | -7,472,972,583,677,670,000 | 37.616162 | 117 | 0.551138 | false |
jofomah/rules | testpy/waltzdb.py | 1 | 50347 | from durable.lang import *
import math
import datetime
import json
_fact_count = 0
def create_and_post(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.post('waltzdb', fact)
_fact_count += 1
def create_and_assert(host, fact):
global _fact_count
fact['id'] = _fact_count
fact['sid'] = 1
host.assert_fact('waltzdb', fact)
_fact_count += 1
def get_x(val):
return math.floor(val / 100)
def get_y(val):
return val % 100
def get_angle(p1, p2):
delta_x = get_x(p2) - get_x(p1)
delta_y = get_y(p2) - get_y(p1)
if delta_x == 0:
if delta_y > 0:
return math.pi / 2
elif delta_y < 0:
return -math.pi / 2
elif delta_y == 0:
if delta_x > 0:
return 0
elif delta_x < 0:
return math.pi
else:
return math.atan2(delta_y, delta_x)
def get_inscribable_angle(base_point, p1, p2):
angle1 = get_angle(base_point, p1)
angle2 = get_angle(base_point, p2)
temp = math.fabs(angle1 - angle2)
if temp > math.pi:
return math.fabs(2 * math.pi - temp)
return temp
def make_3j_junction(j, base_point, p1, p2, p3):
angle12 = get_inscribable_angle(base_point, p1, p2)
angle13 = get_inscribable_angle(base_point, p1, p3)
angle23 = get_inscribable_angle(base_point, p2, p3)
sum1213 = angle12 + angle13
sum1223 = angle12 + angle23
sum1323 = angle13 + angle23
total = 0
if sum1213 < sum1223:
if sum1213 < sum1323:
total = sum1213
j['p2'] = p1; j['p1'] = p2; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
else:
if sum1223 < sum1323:
total = sum1223
j['p2'] = p2; j['p1'] = p1; j['p3'] = p3
else:
total = sum1323
j['p2'] = p3; j['p1'] = p1; j['p3'] = p2
if math.fabs(total - math.pi) < 0.001:
j['name'] = 'tee'
elif total > math.pi:
j['name'] = 'fork'
else:
j['name'] = 'arrow'
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
with statechart('waltzdb'):
with state('start'):
@to('duplicate')
def starting(c):
c.s.gid = 1000
c.s.start_time = unix_time_millis(datetime.datetime.now())
with state('duplicate'):
@to('duplicate')
@when_all(cap(1000),
c.line << m.t == 'line')
def reverse_edges(c):
for frame in c.m:
print('Edge {0} {1}'.format(frame.line.p1, frame.line.p2))
print('Edge {0} {1}'.format(frame.line.p2, frame.line.p1))
c.post({'id': c.s.gid, 't': 'edge', 'p1': frame.line.p1, 'p2': frame.line.p2, 'joined': False})
c.post({'id': c.s.gid + 1, 't': 'edge', 'p1': frame.line.p2, 'p2': frame.line.p1, 'joined': False})
c.s.gid += 2
@to('detect_junctions')
@when_all(pri(1))
def done_reversing(c):
print('detect_junctions')
with state('detect_junctions'):
@to('detect_junctions')
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
c.e3 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2))
def make_3_junction(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '3j', 'visited': 'no'}
make_3j_junction(j, frame.e1.p1, frame.e1.p2, frame.e2.p2, frame.e3.p2)
print('Junction {0} {1} {2} {3} {4}'.format(j['name'], j['base_point'], j['p1'], j['p2'], j['p3']))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '3j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '3j'; c.assert_fact(frame.e2)
frame.e3.id = c.s.gid + 3; frame.e3.joined = True; frame.e3.j_t = '3j'; c.assert_fact(frame.e3)
c.s.gid += 4
@to('detect_junctions')
@when_all(cap(1000),
c.e1 << (m.t == 'edge') & (m.joined == False),
c.e2 << (m.t == 'edge') & (m.joined == False) & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2),
none((m.t == 'edge') & (m.p1 == c.e1.p1) & (m.p2 != c.e1.p2) & (m.p2 != c.e2.p2)))
def make_l(c):
for frame in c.m:
j = {'id': c.s.gid, 't': 'junction', 'base_point': frame.e1.p1, 'j_t': '2j', 'visited': 'no', 'name': 'L', 'p1': frame.e1.p2, 'p2': frame.e2.p2}
print('Junction L {0} {1} {2}'.format(frame.e1.p1, frame.e1.p2, frame.e2.p2))
c.assert_fact(j)
frame.e1.id = c.s.gid + 1; frame.e1.joined = True; frame.e1.j_t = '2j'; c.assert_fact(frame.e1)
frame.e2.id = c.s.gid + 2; frame.e2.joined = True; frame.e2.j_t = '2j'; c.assert_fact(frame.e2)
c.s.gid += 3
@to('find_initial_boundary')
@when_all(pri(1))
def done_detecting(c):
print('find_initial_boundary')
with state('find_initial_boundary'):
@to('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no') & (m.base_point > c.j.base_point)))
def initial_boundary_junction_l(c):
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.s.gid += 2
print('find_second_boundary')
@to('find_second_boundary')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.j_t == '3j') & (m.visited == 'no') & (m.base_point > c.j.base_point)))
def initial_boundary_junction_arrow(c):
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.s.gid += 3
print('find_second_boundary')
with state('find_second_boundary'):
@to('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)))
def second_boundary_junction_l(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '1'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': 'B', 'lid': '1'})
c.s.gid += 3
print('labeling')
@to('labeling')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.name == 'arrow') & (m.visited == 'no'),
c.e1 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1),
c.e2 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2),
c.e3 << (m.t == 'edge') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3),
none((m.t == 'junction') & (m.visited != 'no') & (m.base_point < c.j.base_point)))
def second_boundary_junction_arrow(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': 'B', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': '+', 'lid': '14'})
c.assert_fact({'id': c.s.gid + 3, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': 'B', 'lid': '14'})
c.s.gid += 4
print('labeling')
with state('labeling'):
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.visited == 'no'))
def start_visit_3_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.s.gid += 1
print('visiting_3j')
@to('visiting_2j')
@when_all(pri(1),
c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.visited == 'no'))
def start_visit_2_junction(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'now'; c.assert_fact(c.j)
c.s.gid += 1
print('visiting_2j')
@to('end')
@when_all(pri(2))
def done_labeling(c):
print('end {0}'.format(unix_time_millis(datetime.datetime.now()) - c.s.start_time))
with state('visiting_3j'):
def visit_3j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p3, c.l.n3, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 2, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p3, 'label_name': c.l.n3, 'lid': c.l.lid})
c.s.gid += 3
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_0(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_1(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_2(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n3),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_3(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_4(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_5(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_6(c):
visit_3j(c)
@to('visiting_3j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p3) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_3j_7(c):
visit_3j(c)
@to('marking')
@when_all(pri(1), (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '3j'))
def end_visit(c):
print('marking')
with state('visiting_2j'):
def visit_2j(c):
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p1, c.l.n1, c.l.lid))
print('Edge Label {0} {1} {2} {3}'.format(c.j.base_point, c.j.p2, c.l.n2, c.l.lid))
c.assert_fact({'id': c.s.gid, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p1, 'label_name': c.l.n1, 'lid': c.l.lid})
c.assert_fact({'id': c.s.gid + 1, 't': 'edge_label', 'p1': c.j.base_point, 'p2': c.j.p2, 'label_name': c.l.n2, 'lid': c.l.lid})
c.s.gid += 2
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_0(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n2),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_1(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point) & (m.label_name == c.l.n1),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_2(c):
visit_2j(c)
@to('visiting_2j')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'),
c.l << (m.t == 'label') & (m.name == c.j.name),
none((m.t == 'edge_label') & (m.p1 == c.j.p1) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.p2) & (m.p2 == c.j.base_point)),
none((m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.lid == c.l.lid)))
def visit_2j_3(c):
visit_2j(c)
@to('marking')
@when_all(pri(1), (m.t == 'junction') & (m.visited == 'now') & (m.j_t == '2j'))
def end_visit(c):
print('marking')
with state('marking'):
@to('marking')
@when_all(c.j << (m.t == 'junction') & (m.visited == 'now'),
c.e << (m.t == 'edge') & (m.p2 == c.j.base_point),
c.junction << (m.t == 'junction') & (m.base_point == c.e.p1) & (m.visited == 'yes'))
def marking(c):
c.retract_fact(c.junction); c.junction.id = c.s.gid; c.junction.visited = 'check'; c.assert_fact(c.junction)
c.s.gid += 1
@to('marking')
@when_all(pri(1), c.j << (m.t == 'junction') & (m.visited == 'now'))
def stop_marking(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@to('checking')
@when_all(pri(2))
def start_checking(c):
print('checking')
with state('checking'):
@to('remove_label')
@when_all(c.junction << (m.t == 'junction') & (m.visited == 'check'),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.junction.base_point),
c.j << (m.t == 'junction') & (m.base_point == c.el1.p2) & (m.visited == 'yes'),
none((m.t == 'edge_label') & (m.p1 == c.el1.p2) & (m.p2 == c.junction.base_point) & (m.label_name == c.el1.label_name)))
def checking(c):
print('remove_label')
c.assert_fact({'id': c.s.gid, 't': 'illegal', 'base_point': c.junction.base_point, 'lid': c.el1.lid})
c.s.gid += 1
@to('checking')
@when_all(pri(1), c.j << (m.t == 'junction') & (m.visited == 'check'))
def checking2(c):
c.retract_fact(c.j); c.j.id = c.s.gid; c.j.visited = 'yes'; c.assert_fact(c.j)
c.s.gid += 1
@to('labeling')
@when_all(pri(2))
def stop_checking(c):
print('labeling')
with state('remove_label'):
@to('checking')
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '3j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid),
c.el3 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p3) & (m.lid == c.i.lid))
def remove_label_3j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
c.retract_fact(c.el3)
@to('checking')
@when_all(c.i << (m.t == 'illegal'),
c.j << (m.t == 'junction') & (m.j_t == '2j') & (m.base_point == c.i.base_point),
c.el1 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p1) & (m.lid == c.i.lid),
c.el2 << (m.t == 'edge_label') & (m.p1 == c.j.base_point) & (m.p2 == c.j.p2) & (m.lid == c.i.lid))
def remove_edge_2j(c):
print('checking')
c.retract_fact(c.i)
c.retract_fact(c.el1)
c.retract_fact(c.el2)
state('end')
@when_start
def start(host):
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'1' ,'n1':'B' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'2' ,'n1':'+' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'3' ,'n1':'B' ,'n2':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'4' ,'n1':'-' ,'n2':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'2j' ,'name':'L' ,'lid':'5' ,'n1':'B' ,'n2':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'6' ,'n1':'+' ,'n2':'+' ,'n3':'+'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'7' ,'n1':'-' ,'n2':'-' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'8' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'9' ,'n1':'-' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'fork' ,'lid':'10' ,'n1':'B' ,'n2':'B' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'11' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'12' ,'n1':'B' ,'n2':'-' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'tee' ,'lid':'13' ,'n1':'B' ,'n2':'B' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'14' ,'n1':'B' ,'n2':'+' ,'n3':'B'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'15' ,'n1':'-' ,'n2':'+' ,'n3':'-'})
create_and_assert(host, {'t':'label' ,'j_t':'3j' ,'name':'arrow' ,'lid':'16' ,'n1':'+' ,'n2':'-' ,'n3':'+'})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':20000})
create_and_post(host, {'t':'line' ,'p1':20000 ,'p2':30000})
create_and_post(host, {'t':'line' ,'p1':30000 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':0 ,'p2':2})
create_and_post(host, {'t':'line' ,'p1':2 ,'p2':3})
create_and_post(host, {'t':'line' ,'p1':3 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':4 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':40004 ,'p2':40000})
create_and_post(host, {'t':'line' ,'p1':40000 ,'p2':50001})
create_and_post(host, {'t':'line' ,'p1':50001 ,'p2':50002})
create_and_post(host, {'t':'line' ,'p1':50002 ,'p2':50003})
create_and_post(host, {'t':'line' ,'p1':50003 ,'p2':50005})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':40004})
create_and_post(host, {'t':'line' ,'p1':50005 ,'p2':30005})
create_and_post(host, {'t':'line' ,'p1':30005 ,'p2':20005})
create_and_post(host, {'t':'line' ,'p1':20005 ,'p2':10005})
create_and_post(host, {'t':'line' ,'p1':10005 ,'p2':4})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':80000})
create_and_post(host, {'t':'line' ,'p1':80000 ,'p2':90000})
create_and_post(host, {'t':'line' ,'p1':90000 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':60000 ,'p2':60002})
create_and_post(host, {'t':'line' ,'p1':60002 ,'p2':60003})
create_and_post(host, {'t':'line' ,'p1':60003 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':60004 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':100004 ,'p2':100000})
create_and_post(host, {'t':'line' ,'p1':100000 ,'p2':110001})
create_and_post(host, {'t':'line' ,'p1':110001 ,'p2':110002})
create_and_post(host, {'t':'line' ,'p1':110002 ,'p2':110003})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':110005})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':100004})
create_and_post(host, {'t':'line' ,'p1':110005 ,'p2':90005})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':80005})
create_and_post(host, {'t':'line' ,'p1':80005 ,'p2':70005})
create_and_post(host, {'t':'line' ,'p1':70005 ,'p2':60004})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':20006})
create_and_post(host, {'t':'line' ,'p1':20006 ,'p2':30006})
create_and_post(host, {'t':'line' ,'p1':30006 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':6 ,'p2':8})
create_and_post(host, {'t':'line' ,'p1':8 ,'p2':9})
create_and_post(host, {'t':'line' ,'p1':9 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':10 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':40010 ,'p2':40006})
create_and_post(host, {'t':'line' ,'p1':40006 ,'p2':50007})
create_and_post(host, {'t':'line' ,'p1':50007 ,'p2':50008})
create_and_post(host, {'t':'line' ,'p1':50008 ,'p2':50009})
create_and_post(host, {'t':'line' ,'p1':50009 ,'p2':50011})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':40010})
create_and_post(host, {'t':'line' ,'p1':50011 ,'p2':30011})
create_and_post(host, {'t':'line' ,'p1':30011 ,'p2':20011})
create_and_post(host, {'t':'line' ,'p1':20011 ,'p2':10011})
create_and_post(host, {'t':'line' ,'p1':10011 ,'p2':10})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':80006})
create_and_post(host, {'t':'line' ,'p1':80006 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':90006 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':60006 ,'p2':60008})
create_and_post(host, {'t':'line' ,'p1':60008 ,'p2':60009})
create_and_post(host, {'t':'line' ,'p1':60009 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':60010 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':100010 ,'p2':100006})
create_and_post(host, {'t':'line' ,'p1':100006 ,'p2':110007})
create_and_post(host, {'t':'line' ,'p1':110007 ,'p2':110008})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':110009})
create_and_post(host, {'t':'line' ,'p1':110009 ,'p2':110011})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':100010})
create_and_post(host, {'t':'line' ,'p1':110011 ,'p2':90011})
create_and_post(host, {'t':'line' ,'p1':90011 ,'p2':80011})
create_and_post(host, {'t':'line' ,'p1':80011 ,'p2':70011})
create_and_post(host, {'t':'line' ,'p1':70011 ,'p2':60010})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':140000})
create_and_post(host, {'t':'line' ,'p1':140000 ,'p2':150000})
create_and_post(host, {'t':'line' ,'p1':150000 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':120000 ,'p2':120002})
create_and_post(host, {'t':'line' ,'p1':120002 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':120003 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':120004 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':160004 ,'p2':160000})
create_and_post(host, {'t':'line' ,'p1':160000 ,'p2':170001})
create_and_post(host, {'t':'line' ,'p1':170001 ,'p2':170002})
create_and_post(host, {'t':'line' ,'p1':170002 ,'p2':170003})
create_and_post(host, {'t':'line' ,'p1':170003 ,'p2':170005})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':160004})
create_and_post(host, {'t':'line' ,'p1':170005 ,'p2':150005})
create_and_post(host, {'t':'line' ,'p1':150005 ,'p2':140005})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':130005})
create_and_post(host, {'t':'line' ,'p1':130005 ,'p2':120004})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':200000})
create_and_post(host, {'t':'line' ,'p1':200000 ,'p2':210000})
create_and_post(host, {'t':'line' ,'p1':210000 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':180000 ,'p2':180002})
create_and_post(host, {'t':'line' ,'p1':180002 ,'p2':180003})
create_and_post(host, {'t':'line' ,'p1':180003 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':180004 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':220004 ,'p2':220000})
create_and_post(host, {'t':'line' ,'p1':220000 ,'p2':230001})
create_and_post(host, {'t':'line' ,'p1':230001 ,'p2':230002})
create_and_post(host, {'t':'line' ,'p1':230002 ,'p2':230003})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':230005})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':220004})
create_and_post(host, {'t':'line' ,'p1':230005 ,'p2':210005})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':200005})
create_and_post(host, {'t':'line' ,'p1':200005 ,'p2':190005})
create_and_post(host, {'t':'line' ,'p1':190005 ,'p2':180004})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':140006 ,'p2':150006})
create_and_post(host, {'t':'line' ,'p1':150006 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':120006 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':120008 ,'p2':120009})
create_and_post(host, {'t':'line' ,'p1':120009 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':120010 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':160010 ,'p2':160006})
create_and_post(host, {'t':'line' ,'p1':160006 ,'p2':170007})
create_and_post(host, {'t':'line' ,'p1':170007 ,'p2':170008})
create_and_post(host, {'t':'line' ,'p1':170008 ,'p2':170009})
create_and_post(host, {'t':'line' ,'p1':170009 ,'p2':170011})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':160010})
create_and_post(host, {'t':'line' ,'p1':170011 ,'p2':150011})
create_and_post(host, {'t':'line' ,'p1':150011 ,'p2':140011})
create_and_post(host, {'t':'line' ,'p1':140011 ,'p2':130011})
create_and_post(host, {'t':'line' ,'p1':130011 ,'p2':120010})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':200006})
create_and_post(host, {'t':'line' ,'p1':200006 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':210006 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':180006 ,'p2':180008})
create_and_post(host, {'t':'line' ,'p1':180008 ,'p2':180009})
create_and_post(host, {'t':'line' ,'p1':180009 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':180010 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':220010 ,'p2':220006})
create_and_post(host, {'t':'line' ,'p1':220006 ,'p2':230007})
create_and_post(host, {'t':'line' ,'p1':230007 ,'p2':230008})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':230009})
create_and_post(host, {'t':'line' ,'p1':230009 ,'p2':230011})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':220010})
create_and_post(host, {'t':'line' ,'p1':230011 ,'p2':210011})
create_and_post(host, {'t':'line' ,'p1':210011 ,'p2':200011})
create_and_post(host, {'t':'line' ,'p1':200011 ,'p2':190011})
create_and_post(host, {'t':'line' ,'p1':190011 ,'p2':180010})
create_and_post(host, {'t':'line' ,'p1':110003 ,'p2':120003})
create_and_post(host, {'t':'line' ,'p1':90005 ,'p2':90006})
create_and_post(host, {'t':'line' ,'p1':140005 ,'p2':140006})
create_and_post(host, {'t':'line' ,'p1':110008 ,'p2':120008})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':260000})
create_and_post(host, {'t':'line' ,'p1':260000 ,'p2':270000})
create_and_post(host, {'t':'line' ,'p1':270000 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':240000 ,'p2':240002})
create_and_post(host, {'t':'line' ,'p1':240002 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':240003 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':240004 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':280004 ,'p2':280000})
create_and_post(host, {'t':'line' ,'p1':280000 ,'p2':290001})
create_and_post(host, {'t':'line' ,'p1':290001 ,'p2':290002})
create_and_post(host, {'t':'line' ,'p1':290002 ,'p2':290003})
create_and_post(host, {'t':'line' ,'p1':290003 ,'p2':290005})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':280004})
create_and_post(host, {'t':'line' ,'p1':290005 ,'p2':270005})
create_and_post(host, {'t':'line' ,'p1':270005 ,'p2':260005})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':250005})
create_and_post(host, {'t':'line' ,'p1':250005 ,'p2':240004})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':320000})
create_and_post(host, {'t':'line' ,'p1':320000 ,'p2':330000})
create_and_post(host, {'t':'line' ,'p1':330000 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':300000 ,'p2':300002})
create_and_post(host, {'t':'line' ,'p1':300002 ,'p2':300003})
create_and_post(host, {'t':'line' ,'p1':300003 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':300004 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':340004 ,'p2':340000})
create_and_post(host, {'t':'line' ,'p1':340000 ,'p2':350001})
create_and_post(host, {'t':'line' ,'p1':350001 ,'p2':350002})
create_and_post(host, {'t':'line' ,'p1':350002 ,'p2':350003})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':350005})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':340004})
create_and_post(host, {'t':'line' ,'p1':350005 ,'p2':330005})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':320005})
create_and_post(host, {'t':'line' ,'p1':320005 ,'p2':310005})
create_and_post(host, {'t':'line' ,'p1':310005 ,'p2':300004})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':260006 ,'p2':270006})
create_and_post(host, {'t':'line' ,'p1':270006 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':240006 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':240008 ,'p2':240009})
create_and_post(host, {'t':'line' ,'p1':240009 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':240010 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':280010 ,'p2':280006})
create_and_post(host, {'t':'line' ,'p1':280006 ,'p2':290007})
create_and_post(host, {'t':'line' ,'p1':290007 ,'p2':290008})
create_and_post(host, {'t':'line' ,'p1':290008 ,'p2':290009})
create_and_post(host, {'t':'line' ,'p1':290009 ,'p2':290011})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':280010})
create_and_post(host, {'t':'line' ,'p1':290011 ,'p2':270011})
create_and_post(host, {'t':'line' ,'p1':270011 ,'p2':260011})
create_and_post(host, {'t':'line' ,'p1':260011 ,'p2':250011})
create_and_post(host, {'t':'line' ,'p1':250011 ,'p2':240010})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':320006})
create_and_post(host, {'t':'line' ,'p1':320006 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':330006 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':300006 ,'p2':300008})
create_and_post(host, {'t':'line' ,'p1':300008 ,'p2':300009})
create_and_post(host, {'t':'line' ,'p1':300009 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':300010 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':340010 ,'p2':340006})
create_and_post(host, {'t':'line' ,'p1':340006 ,'p2':350007})
create_and_post(host, {'t':'line' ,'p1':350007 ,'p2':350008})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':350009})
create_and_post(host, {'t':'line' ,'p1':350009 ,'p2':350011})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':340010})
create_and_post(host, {'t':'line' ,'p1':350011 ,'p2':330011})
create_and_post(host, {'t':'line' ,'p1':330011 ,'p2':320011})
create_and_post(host, {'t':'line' ,'p1':320011 ,'p2':310011})
create_and_post(host, {'t':'line' ,'p1':310011 ,'p2':300010})
create_and_post(host, {'t':'line' ,'p1':230003 ,'p2':240003})
create_and_post(host, {'t':'line' ,'p1':210005 ,'p2':210006})
create_and_post(host, {'t':'line' ,'p1':260005 ,'p2':260006})
create_and_post(host, {'t':'line' ,'p1':230008 ,'p2':240008})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':380000})
create_and_post(host, {'t':'line' ,'p1':380000 ,'p2':390000})
create_and_post(host, {'t':'line' ,'p1':390000 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':360000 ,'p2':360002})
create_and_post(host, {'t':'line' ,'p1':360002 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':360003 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':360004 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':400004 ,'p2':400000})
create_and_post(host, {'t':'line' ,'p1':400000 ,'p2':410001})
create_and_post(host, {'t':'line' ,'p1':410001 ,'p2':410002})
create_and_post(host, {'t':'line' ,'p1':410002 ,'p2':410003})
create_and_post(host, {'t':'line' ,'p1':410003 ,'p2':410005})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':400004})
create_and_post(host, {'t':'line' ,'p1':410005 ,'p2':390005})
create_and_post(host, {'t':'line' ,'p1':390005 ,'p2':380005})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':370005})
create_and_post(host, {'t':'line' ,'p1':370005 ,'p2':360004})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':440000})
create_and_post(host, {'t':'line' ,'p1':440000 ,'p2':450000})
create_and_post(host, {'t':'line' ,'p1':450000 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':420000 ,'p2':420002})
create_and_post(host, {'t':'line' ,'p1':420002 ,'p2':420003})
create_and_post(host, {'t':'line' ,'p1':420003 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':420004 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':460004 ,'p2':460000})
create_and_post(host, {'t':'line' ,'p1':460000 ,'p2':470001})
create_and_post(host, {'t':'line' ,'p1':470001 ,'p2':470002})
create_and_post(host, {'t':'line' ,'p1':470002 ,'p2':470003})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':470005})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':460004})
create_and_post(host, {'t':'line' ,'p1':470005 ,'p2':450005})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':440005})
create_and_post(host, {'t':'line' ,'p1':440005 ,'p2':430005})
create_and_post(host, {'t':'line' ,'p1':430005 ,'p2':420004})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':380006 ,'p2':390006})
create_and_post(host, {'t':'line' ,'p1':390006 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':360006 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':360008 ,'p2':360009})
create_and_post(host, {'t':'line' ,'p1':360009 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':360010 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':400010 ,'p2':400006})
create_and_post(host, {'t':'line' ,'p1':400006 ,'p2':410007})
create_and_post(host, {'t':'line' ,'p1':410007 ,'p2':410008})
create_and_post(host, {'t':'line' ,'p1':410008 ,'p2':410009})
create_and_post(host, {'t':'line' ,'p1':410009 ,'p2':410011})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':400010})
create_and_post(host, {'t':'line' ,'p1':410011 ,'p2':390011})
create_and_post(host, {'t':'line' ,'p1':390011 ,'p2':380011})
create_and_post(host, {'t':'line' ,'p1':380011 ,'p2':370011})
create_and_post(host, {'t':'line' ,'p1':370011 ,'p2':360010})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':440006})
create_and_post(host, {'t':'line' ,'p1':440006 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':450006 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':420006 ,'p2':420008})
create_and_post(host, {'t':'line' ,'p1':420008 ,'p2':420009})
create_and_post(host, {'t':'line' ,'p1':420009 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':420010 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':460010 ,'p2':460006})
create_and_post(host, {'t':'line' ,'p1':460006 ,'p2':470007})
create_and_post(host, {'t':'line' ,'p1':470007 ,'p2':470008})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':470009})
create_and_post(host, {'t':'line' ,'p1':470009 ,'p2':470011})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':460010})
create_and_post(host, {'t':'line' ,'p1':470011 ,'p2':450011})
create_and_post(host, {'t':'line' ,'p1':450011 ,'p2':440011})
create_and_post(host, {'t':'line' ,'p1':440011 ,'p2':430011})
create_and_post(host, {'t':'line' ,'p1':430011 ,'p2':420010})
create_and_post(host, {'t':'line' ,'p1':350003 ,'p2':360003})
create_and_post(host, {'t':'line' ,'p1':330005 ,'p2':330006})
create_and_post(host, {'t':'line' ,'p1':380005 ,'p2':380006})
create_and_post(host, {'t':'line' ,'p1':350008 ,'p2':360008})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':500000})
create_and_post(host, {'t':'line' ,'p1':500000 ,'p2':510000})
create_and_post(host, {'t':'line' ,'p1':510000 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':480000 ,'p2':480002})
create_and_post(host, {'t':'line' ,'p1':480002 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':480003 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':480004 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':520004 ,'p2':520000})
create_and_post(host, {'t':'line' ,'p1':520000 ,'p2':530001})
create_and_post(host, {'t':'line' ,'p1':530001 ,'p2':530002})
create_and_post(host, {'t':'line' ,'p1':530002 ,'p2':530003})
create_and_post(host, {'t':'line' ,'p1':530003 ,'p2':530005})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':520004})
create_and_post(host, {'t':'line' ,'p1':530005 ,'p2':510005})
create_and_post(host, {'t':'line' ,'p1':510005 ,'p2':500005})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':490005})
create_and_post(host, {'t':'line' ,'p1':490005 ,'p2':480004})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':560000})
create_and_post(host, {'t':'line' ,'p1':560000 ,'p2':570000})
create_and_post(host, {'t':'line' ,'p1':570000 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':540000 ,'p2':540002})
create_and_post(host, {'t':'line' ,'p1':540002 ,'p2':540003})
create_and_post(host, {'t':'line' ,'p1':540003 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':540004 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':580004 ,'p2':580000})
create_and_post(host, {'t':'line' ,'p1':580000 ,'p2':590001})
create_and_post(host, {'t':'line' ,'p1':590001 ,'p2':590002})
create_and_post(host, {'t':'line' ,'p1':590002 ,'p2':590003})
create_and_post(host, {'t':'line' ,'p1':590003 ,'p2':590005})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':580004})
create_and_post(host, {'t':'line' ,'p1':590005 ,'p2':570005})
create_and_post(host, {'t':'line' ,'p1':570005 ,'p2':560005})
create_and_post(host, {'t':'line' ,'p1':560005 ,'p2':550005})
create_and_post(host, {'t':'line' ,'p1':550005 ,'p2':540004})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':500006 ,'p2':510006})
create_and_post(host, {'t':'line' ,'p1':510006 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':480006 ,'p2':480008})
create_and_post(host, {'t':'line' ,'p1':480008 ,'p2':480009})
create_and_post(host, {'t':'line' ,'p1':480009 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':480010 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':520010 ,'p2':520006})
create_and_post(host, {'t':'line' ,'p1':520006 ,'p2':530007})
create_and_post(host, {'t':'line' ,'p1':530007 ,'p2':530008})
create_and_post(host, {'t':'line' ,'p1':530008 ,'p2':530009})
create_and_post(host, {'t':'line' ,'p1':530009 ,'p2':530011})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':520010})
create_and_post(host, {'t':'line' ,'p1':530011 ,'p2':510011})
create_and_post(host, {'t':'line' ,'p1':510011 ,'p2':500011})
create_and_post(host, {'t':'line' ,'p1':500011 ,'p2':490011})
create_and_post(host, {'t':'line' ,'p1':490011 ,'p2':480010})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':560006})
create_and_post(host, {'t':'line' ,'p1':560006 ,'p2':570006})
create_and_post(host, {'t':'line' ,'p1':570006 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':540006 ,'p2':540008})
create_and_post(host, {'t':'line' ,'p1':540008 ,'p2':540009})
create_and_post(host, {'t':'line' ,'p1':540009 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':540010 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':580010 ,'p2':580006})
create_and_post(host, {'t':'line' ,'p1':580006 ,'p2':590007})
create_and_post(host, {'t':'line' ,'p1':590007 ,'p2':590008})
create_and_post(host, {'t':'line' ,'p1':590008 ,'p2':590009})
create_and_post(host, {'t':'line' ,'p1':590009 ,'p2':590011})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':580010})
create_and_post(host, {'t':'line' ,'p1':590011 ,'p2':570011})
create_and_post(host, {'t':'line' ,'p1':570011 ,'p2':560011})
create_and_post(host, {'t':'line' ,'p1':560011 ,'p2':550011})
create_and_post(host, {'t':'line' ,'p1':550011 ,'p2':540010})
create_and_post(host, {'t':'line' ,'p1':470003 ,'p2':480003})
create_and_post(host, {'t':'line' ,'p1':450005 ,'p2':450006})
create_and_post(host, {'t':'line' ,'p1':500005 ,'p2':500006})
create_and_post(host, {'t':'line' ,'p1':470008 ,'p2':480008})
run_all(['/tmp/redis0.sock'])
| mit | -7,531,070,431,205,708,000 | 59.440576 | 160 | 0.491708 | false |
Leila20/django | tests/check_framework/test_security.py | 17 | 19560 | from django.conf import settings
from django.core.checks.security import base, csrf, sessions
from django.core.checks.utils import patch_middleware_message
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckSessionCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_secure
return check_session_cookie_secure
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[])
def test_session_cookie_secure_with_installed_app(self):
"""
Warn if SESSION_COOKIE_SECURE is off and "django.contrib.sessions" is
in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=[])
def test_session_cookie_secure_with_installed_app_middleware_classes(self):
self.assertEqual(self.func(None), [sessions.W010])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware(self):
"""
Warn if SESSION_COOKIE_SECURE is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(self.func(None), [sessions.W011])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=[],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_with_middleware_middleware_classes(self):
self.assertEqual(self.func(None), [patch_middleware_message(sessions.W011)])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both(self):
"""
If SESSION_COOKIE_SECURE is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=None,
MIDDLEWARE_CLASSES=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_both_middleware_classes(self):
self.assertEqual(self.func(None), [sessions.W012])
@override_settings(
SESSION_COOKIE_SECURE=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_secure_true(self):
"""
If SESSION_COOKIE_SECURE is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckSessionCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.sessions import check_session_cookie_httponly
return check_session_cookie_httponly
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=[])
def test_session_cookie_httponly_with_installed_app(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and "django.contrib.sessions"
is in INSTALLED_APPS.
"""
self.assertEqual(self.func(None), [sessions.W013])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=[],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_with_middleware(self):
"""
Warn if SESSION_COOKIE_HTTPONLY is off and
"django.contrib.sessions.middleware.SessionMiddleware" is in
MIDDLEWARE.
"""
self.assertEqual(self.func(None), [sessions.W014])
@override_settings(
SESSION_COOKIE_HTTPONLY=False,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_both(self):
"""
If SESSION_COOKIE_HTTPONLY is off and we find both the session app and
the middleware, provide one common warning.
"""
self.assertEqual(self.func(None), [sessions.W015])
@override_settings(
SESSION_COOKIE_HTTPONLY=True,
INSTALLED_APPS=["django.contrib.sessions"],
MIDDLEWARE=["django.contrib.sessions.middleware.SessionMiddleware"])
def test_session_cookie_httponly_true(self):
"""
If SESSION_COOKIE_HTTPONLY is on, there's no warning about it.
"""
self.assertEqual(self.func(None), [])
class CheckCSRFMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_middleware
return check_csrf_middleware
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[])
def test_no_csrf_middleware(self):
"""
Warn if CsrfViewMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [csrf.W003])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"])
def test_with_csrf_middleware(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieSecureTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_secure
return check_csrf_cookie_secure
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_SECURE isn't True.
"""
self.assertEqual(self.func(None), [csrf.W016])
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[], CSRF_COOKIE_SECURE=False)
def test_with_csrf_cookie_secure_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_SECURE is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_SECURE=True)
def test_with_csrf_cookie_secure_true(self):
self.assertEqual(self.func(None), [])
class CheckCSRFCookieHttpOnlyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.csrf import check_csrf_cookie_httponly
return check_csrf_cookie_httponly
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false(self):
"""
Warn if CsrfViewMiddleware is in MIDDLEWARE but
CSRF_COOKIE_HTTPONLY isn't True.
"""
self.assertEqual(self.func(None), [csrf.W017])
@override_settings(MIDDLEWARE=[], MIDDLEWARE_CLASSES=[], CSRF_COOKIE_HTTPONLY=False)
def test_with_csrf_cookie_httponly_false_no_middleware(self):
"""
No warning if CsrfViewMiddleware isn't in MIDDLEWARE, even if
CSRF_COOKIE_HTTPONLY is False.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
CSRF_COOKIE_HTTPONLY=True)
def test_with_csrf_cookie_httponly_true(self):
self.assertEqual(self.func(None), [])
class CheckSecurityMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_security_middleware
return check_security_middleware
@override_settings(MIDDLEWARE=[])
def test_no_security_middleware(self):
"""
Warn if SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [base.W001])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"])
def test_with_security_middleware(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts
return check_sts
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=0)
def test_no_sts(self):
"""
Warn if SECURE_HSTS_SECONDS isn't > 0.
"""
self.assertEqual(self.func(None), [base.W004])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_SECONDS=0)
def test_no_sts_no_middleware(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't > 0 and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_SECONDS=3600)
def test_with_sts(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecuritySubdomainsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_include_subdomains
return check_sts_include_subdomains
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains(self):
"""
Warn if SECURE_HSTS_INCLUDE_SUBDOMAINS isn't True.
"""
self.assertEqual(self.func(None), [base.W005])
@override_settings(
MIDDLEWARE=[],
SECURE_HSTS_INCLUDE_SUBDOMAINS=False,
SECURE_HSTS_SECONDS=3600)
def test_no_sts_subdomains_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None)
def test_no_sts_subdomains_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_SECONDS=3600)
def test_with_sts_subdomains(self):
self.assertEqual(self.func(None), [])
class CheckStrictTransportSecurityPreloadTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_sts_preload
return check_sts_preload
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=False,
SECURE_HSTS_SECONDS=3600,
)
def test_no_sts_preload(self):
"""
Warn if SECURE_HSTS_PRELOAD isn't True.
"""
self.assertEqual(self.func(None), [base.W021])
@override_settings(MIDDLEWARE=[], SECURE_HSTS_PRELOAD=False, SECURE_HSTS_SECONDS=3600)
def test_no_sts_preload_no_middleware(self):
"""
Don't warn if SecurityMiddleware isn't installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False,
SECURE_HSTS_SECONDS=None,
)
def test_no_sts_preload_no_seconds(self):
"""
Don't warn if SECURE_HSTS_SECONDS isn't set.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_HSTS_PRELOAD=True,
SECURE_HSTS_SECONDS=3600,
)
def test_with_sts_preload(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsMiddlewareTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_options_middleware
return check_xframe_options_middleware
@override_settings(MIDDLEWARE=[])
def test_middleware_not_installed(self):
"""
Warn if XFrameOptionsMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [base.W002])
@override_settings(MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"])
def test_middleware_installed(self):
self.assertEqual(self.func(None), [])
class CheckXFrameOptionsDenyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xframe_deny
return check_xframe_deny
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='SAMEORIGIN',
)
def test_x_frame_options_not_deny(self):
"""
Warn if XFrameOptionsMiddleware is in MIDDLEWARE but
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [base.W019])
@override_settings(MIDDLEWARE=[], X_FRAME_OPTIONS='SAMEORIGIN')
def test_middleware_not_installed(self):
"""
No error if XFrameOptionsMiddleware isn't in MIDDLEWARE even if
X_FRAME_OPTIONS isn't 'DENY'.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.clickjacking.XFrameOptionsMiddleware"],
X_FRAME_OPTIONS='DENY',
)
def test_xframe_deny(self):
self.assertEqual(self.func(None), [])
class CheckContentTypeNosniffTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_content_type_nosniff
return check_content_type_nosniff
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff(self):
"""
Warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True.
"""
self.assertEqual(self.func(None), [base.W006])
@override_settings(
MIDDLEWARE=[],
SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_no_content_type_nosniff_no_middleware(self):
"""
Don't warn if SECURE_CONTENT_TYPE_NOSNIFF isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_with_content_type_nosniff(self):
self.assertEqual(self.func(None), [])
class CheckXssFilterTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_xss_filter
return check_xss_filter
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter(self):
"""
Warn if SECURE_BROWSER_XSS_FILTER isn't True.
"""
self.assertEqual(self.func(None), [base.W007])
@override_settings(
MIDDLEWARE=[],
SECURE_BROWSER_XSS_FILTER=False)
def test_no_xss_filter_no_middleware(self):
"""
Don't warn if SECURE_BROWSER_XSS_FILTER isn't True and
SecurityMiddleware isn't in MIDDLEWARE.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_BROWSER_XSS_FILTER=True)
def test_with_xss_filter(self):
self.assertEqual(self.func(None), [])
class CheckSSLRedirectTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_ssl_redirect
return check_ssl_redirect
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect(self):
"""
Warn if SECURE_SSL_REDIRECT isn't True.
"""
self.assertEqual(self.func(None), [base.W008])
@override_settings(
MIDDLEWARE=[],
SECURE_SSL_REDIRECT=False)
def test_no_ssl_redirect_no_middleware(self):
"""
Don't warn if SECURE_SSL_REDIRECT is False and SecurityMiddleware isn't
installed.
"""
self.assertEqual(self.func(None), [])
@override_settings(
MIDDLEWARE=["django.middleware.security.SecurityMiddleware"],
SECURE_SSL_REDIRECT=True)
def test_with_ssl_redirect(self):
self.assertEqual(self.func(None), [])
class CheckSecretKeyTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_secret_key
return check_secret_key
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'ab')
def test_okay_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertGreater(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [])
@override_settings(SECRET_KEY='')
def test_empty_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_missing_secret_key(self):
del settings.SECRET_KEY
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=None)
def test_none_secret_key(self):
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY=('abcdefghijklmnopqrstuvwx' * 2) + 'a')
def test_low_length_secret_key(self):
self.assertEqual(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH - 1)
self.assertEqual(self.func(None), [base.W009])
@override_settings(SECRET_KEY='abcd' * 20)
def test_low_entropy_secret_key(self):
self.assertGreater(len(settings.SECRET_KEY), base.SECRET_KEY_MIN_LENGTH)
self.assertLess(len(set(settings.SECRET_KEY)), base.SECRET_KEY_MIN_UNIQUE_CHARACTERS)
self.assertEqual(self.func(None), [base.W009])
class CheckDebugTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_debug
return check_debug
@override_settings(DEBUG=True)
def test_debug_true(self):
"""
Warn if DEBUG is True.
"""
self.assertEqual(self.func(None), [base.W018])
@override_settings(DEBUG=False)
def test_debug_false(self):
self.assertEqual(self.func(None), [])
class CheckAllowedHostsTest(SimpleTestCase):
@property
def func(self):
from django.core.checks.security.base import check_allowed_hosts
return check_allowed_hosts
@override_settings(ALLOWED_HOSTS=[])
def test_allowed_hosts_empty(self):
self.assertEqual(self.func(None), [base.W020])
@override_settings(ALLOWED_HOSTS=['.example.com', ])
def test_allowed_hosts_set(self):
self.assertEqual(self.func(None), [])
| bsd-3-clause | -1,502,807,203,643,040,000 | 33.43662 | 96 | 0.661145 | false |
kittiu/sale-workflow | sale_payment_term_interest/model/account_payment_term.py | 31 | 4714 | # -*- coding: utf-8 -*-
#
#
# Authors: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from __future__ import division
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp.addons.decimal_precision as dp
from openerp import models, fields, api
from openerp.tools.float_utils import float_round as round, float_compare
class AccountPaymentTerm(models.Model):
_inherit = 'account.payment.term'
interest_min = fields.Float(
string='Minimum Interest Amount',
digits=dp.get_precision('Account'),
help="The minimum amount of interest added to a sales "
"order.")
@api.multi
def compute_total_interest(self, value):
self.ensure_one()
values = self.compute_interest(value)
interest = sum(interest for __, __, interest in values)
precision_model = self.env['decimal.precision']
precision = precision_model.precision_get('Account')
compare = float_compare(interest,
self.interest_min,
precision_digits=precision)
if compare == -1: # interest < interest_min
return self.interest_min
else:
return interest
@api.multi
def compute_interest(self, value, date_ref=False):
if date_ref:
date_ref = fields.Date.from_string(date_ref)
else:
date_ref = datetime.today().date()
amount = value
result = []
lines_total = 0.0
precision_model = self.env['decimal.precision']
# The computation of the amount for each term is the exact same
# than the one in 'account_payment_term.compute()', this is
# required to ensure that the interest fees are based on the
# same amounts. This is why the 'account' precision is used:
# this is the one used in 'account_payment_term.compute()'.
prec = precision_model.precision_get('Account')
for line in self.line_ids:
if line.value == 'fixed':
line_amount = round(line.value_amount, precision_digits=prec)
elif line.value == 'procent':
line_amount = round(value * line.value_amount,
precision_digits=prec)
elif line.value == 'balance':
line_amount = round(amount, prec)
if not line_amount:
continue
next_date = date_ref + relativedelta(days=line.days)
if line.days2 < 0:
# Getting 1st of next month
next_first_date = next_date + relativedelta(day=1,
months=1)
next_date = (next_first_date +
relativedelta(days=line.days2))
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
interest = 0.0
if line.interest_rate:
days = (next_date - date_ref).days
rate = line.interest_rate / 100 / (12 * 30) # %/(months*days)
interest = line_amount * rate * days
result.append((fields.Date.to_string(next_date),
line_amount,
interest))
amount -= line_amount
lines_total += line_amount
dist = round(value - lines_total, precision_digits=prec)
if dist:
result.append((fields.Date.today(), dist, 0.0))
return result
class AccountPaymentTermLine(models.Model):
_inherit = 'account.payment.term.line'
interest_rate = fields.Float(
string='Interest Rate',
digits=dp.get_precision('Payment Term'),
help="The annual interest rate applied on a sales order. "
"Value between 0 and 100.\n"
"The interest is computed as: "
"'Amount * (Interest Rate / 100 / "
" (12 months * 30 days)) * Term Days'")
| agpl-3.0 | -2,768,558,756,939,886,000 | 38.613445 | 78 | 0.587187 | false |
jmehnle/ansible | lib/ansible/modules/cloud/ovirt/ovirt_networks.py | 15 | 9604 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks
short_description: Module to manage logical networks in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage logical networks in oVirt/RHV"
options:
name:
description:
- "Name of the network to manage."
required: true
state:
description:
- "Should the network be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where network reside."
description:
description:
- "Description of the network."
comment:
description:
- "Comment of the network."
vlan_tag:
description:
- "Specify VLAN tag."
vm_network:
description:
- "If I(True) network will be marked as network for VM."
- "VM network carries traffic relevant to the virtual machine."
mtu:
description:
- "Maximum transmission unit (MTU) of the network."
clusters:
description:
- "List of dictionaries describing how the network is managed in specific cluster."
- "C(name) - Cluster name."
- "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
- "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
- "C(display) - I(true) if the network should marked as display network."
- "C(migration) - I(true) if the network should marked as migration network."
- "C(gluster) - I(true) if the network should marked as gluster network."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create network
- ovirt_networks:
data_center: mydatacenter
name: mynetwork
vlan_tag: 1
vm_network: true
# Remove network
- ovirt_networks:
state: absent
name: mynetwork
'''
RETURN = '''
id:
description: "ID of the managed network"
returned: "On success if network is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
network:
description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/network."
returned: "On success if network is found."
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class NetworksModule(BaseModule):
def build_entity(self):
return otypes.Network(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
data_center=otypes.DataCenter(
name=self._module.params['data_center'],
) if self._module.params['data_center'] else None,
vlan=otypes.Vlan(
self._module.params['vlan_tag'],
) if self._module.params['vlan_tag'] else None,
usages=[
otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
] if self._module.params['vm_network'] is not None else None,
mtu=self._module.params['mtu'],
)
def update_check(self, entity):
return (
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
equal(self._module.params.get('vm_network'), True if entity.usages else False) and
equal(self._module.params.get('mtu'), entity.mtu)
)
class ClusterNetworksModule(BaseModule):
def __init__(self, network_id, cluster_network, *args, **kwargs):
super(ClusterNetworksModule, self).__init__(*args, **kwargs)
self._network_id = network_id
self._cluster_network = cluster_network
def build_entity(self):
return otypes.Network(
id=self._network_id,
name=self._module.params['name'],
required=self._cluster_network.get('required'),
display=self._cluster_network.get('display'),
usages=[
otypes.NetworkUsage(usage)
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
] if (
self._cluster_network.get('display') is not None or
self._cluster_network.get('gluster') is not None or
self._cluster_network.get('migration') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self._cluster_network.get('required'), entity.required) and
equal(self._cluster_network.get('display'), entity.display) and
equal(
sorted([
usage
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
]),
sorted([
str(usage)
for usage in getattr(entity, 'usages', [])
# VM + MANAGEMENT is part of root network
if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
]),
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
data_center=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
comment=dict(default=None),
vlan_tag=dict(default=None, type='int'),
vm_network=dict(default=None, type='bool'),
mtu=dict(default=None, type='int'),
clusters=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
networks_service = connection.system_service().networks_service()
networks_module = NetworksModule(
connection=connection,
module=module,
service=networks_service,
)
state = module.params['state']
network = networks_module.search_entity(
search_params={
'name': module.params['name'],
'datacenter': module.params['data_center'],
},
)
if state == 'present':
ret = networks_module.create(entity=network)
# Update clusters networks:
if module.params.get('clusters') is not None:
for param_cluster in module.params.get('clusters'):
cluster = search_by_name(clusters_service, param_cluster.get('name'))
if cluster is None:
raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
cluster_networks_service = clusters_service.service(cluster.id).networks_service()
cluster_networks_module = ClusterNetworksModule(
network_id=ret['id'],
cluster_network=param_cluster,
connection=connection,
module=module,
service=cluster_networks_service,
)
if param_cluster.get('assigned', True):
ret = cluster_networks_module.create()
else:
ret = cluster_networks_module.remove()
elif state == 'absent':
ret = networks_module.remove(entity=network)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 | -3,979,436,862,328,987,000 | 34.439114 | 120 | 0.590275 | false |
rjpower/spark | python/pyspark/files.py | 5 | 1885 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
class SparkFiles(object):
"""
Resolves paths to files added through
L{SparkContext.addFile()<pyspark.context.SparkContext.addFile>}.
SparkFiles contains only classmethods; users should not create SparkFiles
instances.
"""
_root_directory = None
_is_running_on_worker = False
_sc = None
def __init__(self):
raise NotImplementedError("Do not construct SparkFiles objects")
@classmethod
def get(cls, filename):
"""
Get the absolute path of a file added through C{SparkContext.addFile()}.
"""
path = os.path.join(SparkFiles.getRootDirectory(), filename)
return os.path.abspath(path)
@classmethod
def getRootDirectory(cls):
"""
Get the root directory that contains files added through
C{SparkContext.addFile()}.
"""
if cls._is_running_on_worker:
return cls._root_directory
else:
# This will have to change if we support multiple SparkContexts:
return cls._sc._jvm.spark.SparkFiles.getRootDirectory()
| apache-2.0 | 5,243,843,828,526,700,000 | 33.272727 | 80 | 0.693369 | false |
srsman/odoo | addons/account_payment/account_move_line.py | 241 | 4455 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from operator import itemgetter
class account_move_line(osv.osv):
_inherit = "account.move.line"
# delegate to parent, used for local fields.function redefinition
def _amount_to_pay(self, cr, uid, ids, field_names, args, context=None):
return {
id: value['amount_residual']
for id, value in self._amount_residual(cr, uid, ids, field_names, args,
context=context).items()
}
def _to_pay_search(self, cr, uid, obj, name, args, context=None):
if not args:
return []
line_obj = self.pool.get('account.move.line')
query = line_obj._query_get(cr, uid, context={})
where = ' and '.join(map(lambda x: '''(SELECT
CASE WHEN l.amount_currency < 0
THEN - l.amount_currency
ELSE l.credit
END - coalesce(sum(pl.amount_currency), 0)
FROM payment_line pl
INNER JOIN payment_order po ON (pl.order_id = po.id)
WHERE move_line_id = l.id
AND po.state != 'cancel'
) %(operator)s %%s ''' % {'operator': x[1]}, args))
sql_args = tuple(map(itemgetter(2), args))
cr.execute(('''SELECT id
FROM account_move_line l
WHERE account_id IN (select id
FROM account_account
WHERE type=%s AND active)
AND reconcile_id IS null
AND credit > 0
AND ''' + where + ' and ' + query), ('payable',)+sql_args )
res = cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(lambda x:x[0], res))]
def line2bank(self, cr, uid, ids, payment_type=None, context=None):
"""
Try to return for each Ledger Posting line a corresponding bank
account according to the payment type. This work using one of
the bank of the partner defined on the invoice eventually
associated to the line.
Return the first suitable bank for the corresponding partner.
"""
payment_mode_obj = self.pool.get('payment.mode')
line2bank = {}
if not ids:
return {}
bank_type = payment_mode_obj.suitable_bank_types(cr, uid, payment_type,
context=context)
for line in self.browse(cr, uid, ids, context=context):
line2bank[line.id] = False
if line.invoice and line.invoice.partner_bank_id:
line2bank[line.id] = line.invoice.partner_bank_id.id
elif line.partner_id:
if not line.partner_id.bank_ids:
line2bank[line.id] = False
else:
for bank in line.partner_id.bank_ids:
if bank.state in bank_type:
line2bank[line.id] = bank.id
break
if not line2bank.get(line.id) and line.partner_id.bank_ids:
line2bank[line.id] = line.partner_id.bank_ids[0].id
else:
raise osv.except_osv(_('Error!'), _('There is no partner defined on the entry line.'))
return line2bank
_columns = {
'amount_to_pay': fields.function(_amount_to_pay,
type='float', string='Amount to pay', fnct_search=_to_pay_search),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,742,568,957,336,944,600 | 41.028302 | 102 | 0.562963 | false |
Andrei-Stepanov/avocado-vt | virttest/libvirt_xml/devices/interface.py | 12 | 9271 | """
interface device support class(es)
http://libvirt.org/formatdomain.html#elementsNICS
http://libvirt.org/formatnwfilter.html#nwfconceptsvars
"""
from virttest.libvirt_xml import accessors, xcepts
from virttest.libvirt_xml.devices import base, librarian
class Interface(base.TypedDeviceBase):
__slots__ = ('source', 'mac_address', 'bandwidth',
'model', 'link_state', 'target',
'driver', 'address', 'boot_order',
'filterref', 'backend', 'virtualport_type')
def __init__(self, type_name, virsh_instance=base.base.virsh):
super(Interface, self).__init__(device_tag='interface',
type_name=type_name,
virsh_instance=virsh_instance)
accessors.XMLElementDict(property_name="source",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='source')
accessors.XMLElementDict(property_name="target",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='target')
accessors.XMLElementDict(property_name="backend",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='backend')
accessors.XMLAttribute(property_name="mac_address",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='mac',
attribute='address')
accessors.XMLAttribute(property_name="link_state",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='link',
attribute='state')
accessors.XMLAttribute(property_name="boot_order",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='boot',
attribute='order')
accessors.XMLElementNest("bandwidth", self,
parent_xpath='/',
tag_name='bandwidth',
subclass=self.Bandwidth,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest("driver", self,
parent_xpath='/',
tag_name='driver',
subclass=self.Driver,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementNest("filterref", self,
parent_xpath='/',
tag_name='filterref',
subclass=self.Filterref,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLAttribute(property_name="model",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='model',
attribute='type')
accessors.XMLElementNest('address', self, parent_xpath='/',
tag_name='address', subclass=self.Address,
subclass_dargs={'type_name': 'pci',
'virsh_instance': virsh_instance})
accessors.XMLAttribute('virtualport_type', self, parent_xpath='/',
tag_name='virtualport', attribute='type')
# For convenience
Address = librarian.get('address')
def new_bandwidth(self, **dargs):
"""
Return a new interafce banwidth instance from dargs
"""
new_one = self.Bandwidth(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_driver(self, **dargs):
"""
Return a new interafce driver instance from dargs
"""
new_one = self.Driver(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_iface_address(self, **dargs):
"""
Return a new interface Address instance and set properties from dargs
"""
new_one = self.Address("pci", virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
def new_filterref(self, **dargs):
"""
Return a new interafce filterref instance from dargs
"""
new_one = self.Filterref(virsh_instance=self.virsh)
for key, value in dargs.items():
setattr(new_one, key, value)
return new_one
class Bandwidth(base.base.LibvirtXMLBase):
"""
Interface bandwidth xml class.
Properties:
inbound:
dict. Keys: average, peak, floor, burst
outbound:
dict. Keys: average, peak, floor, burst
"""
__slots__ = ("inbound", "outbound")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLElementDict("inbound", self, parent_xpath="/",
tag_name="inbound")
accessors.XMLElementDict("outbound", self, parent_xpath="/",
tag_name="outbound")
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<bandwidth/>'
class Driver(base.base.LibvirtXMLBase):
"""
Interface Driver xml class.
Properties:
driver:
dict.
host:
dict. Keys: csum, gso, tso4, tso6, ecn, ufo
guest:
dict. Keys: csum, gso, tso4, tso6, ecn, ufo
"""
__slots__ = ("driver_attr", "driver_host", "driver_guest")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLElementDict("driver_attr", self, parent_xpath="/",
tag_name="driver")
accessors.XMLElementDict("driver_host", self, parent_xpath="/",
tag_name="host")
accessors.XMLElementDict("driver_guest", self, parent_xpath="/",
tag_name="guest")
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<driver/>'
class Filterref(base.base.LibvirtXMLBase):
"""
Interface filterref xml class.
Properties:
name:
string. filter name
parameters:
list. parameters element dict list
"""
__slots__ = ("name", "parameters")
def __init__(self, virsh_instance=base.base.virsh):
accessors.XMLAttribute(property_name="name",
libvirtxml=self,
forbidden=None,
parent_xpath='/',
tag_name='filterref',
attribute='filter')
accessors.XMLElementList(property_name='parameters',
libvirtxml=self,
parent_xpath='/',
marshal_from=self.marshal_from_parameter,
marshal_to=self.marshal_to_parameter)
super(self.__class__, self).__init__(virsh_instance=virsh_instance)
self.xml = '<filterref/>'
@staticmethod
def marshal_from_parameter(item, index, libvirtxml):
"""Convert a dictionary into a tag + attributes"""
del index # not used
del libvirtxml # not used
if not isinstance(item, dict):
raise xcepts.LibvirtXMLError("Expected a dictionary of parameter "
"attributes, not a %s"
% str(item))
# return copy of dict, not reference
return ('parameter', dict(item))
@staticmethod
def marshal_to_parameter(tag, attr_dict, index, libvirtxml):
"""Convert a tag + attributes into a dictionary"""
del index # not used
del libvirtxml # not used
if tag != 'parameter':
return None # skip this one
return dict(attr_dict) # return copy of dict, not reference
| gpl-2.0 | 2,023,565,438,006,017,800 | 40.950226 | 83 | 0.461115 | false |
Natim/sentry | src/sentry/migrations/0051_auto__del_pendingprojectmember__del_unique_pendingprojectmember_projec.py | 36 | 21164 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.delete_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingProjectMember'
db.delete_table('sentry_pendingprojectmember')
# Adding model 'PendingTeamMember'
db.create_table('sentry_pendingteammember', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('team', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='pending_member_set', to=orm['sentry.Team'])),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['PendingTeamMember'])
# Adding unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.create_unique('sentry_pendingteammember', ['team_id', 'email'])
def backwards(self, orm):
# Removing unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.delete_unique('sentry_pendingteammember', ['team_id', 'email'])
# Adding model 'PendingProjectMember'
db.create_table('sentry_pendingprojectmember', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(related_name='pending_member_set', to=orm['sentry.Project'])),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('type', self.gf('django.db.models.fields.IntegerField')(default=0)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
))
db.send_create_signal('sentry', ['PendingProjectMember'])
# Adding unique constraint on 'PendingProjectMember', fields ['project', 'email']
db.create_unique('sentry_pendingprojectmember', ['project_id', 'email'])
# Deleting model 'PendingTeamMember'
db.delete_table('sentry_pendingteammember')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137609)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 5, 3, 29, 45, 137481)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectdomain': {
'Meta': {'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'domain_set'", 'to': "orm['sentry.Project']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause | -8,439,509,003,606,770,000 | 78.265918 | 167 | 0.565158 | false |
haarcuba/testix | test/test_argumentexpectations.py | 1 | 3090 | import hypothesis
import hypothesis.strategies as strategies
import pytest
from testix import fake
from testix import scenario
from testix import testixexception
from testix import argumentexpectations
class TestArgumentExpectations:
@hypothesis.given(A=strategies.integers(),B=strategies.integers())
def test_argument_equals_raises_when_called_with_wrong_arguments(self, A, B):
hypothesis.assume( A != B )
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( A ) >> 'first'
s.some_object( B ) >> 'second'
assert fakeObject( A ) == 'first'
with pytest.raises( testixexception.ExpectationException ):
fakeObject( A )
def test_argument_is_fake_object_with_path( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( argumentexpectations.ArgumentIsFakeObjectWithPath( 'another_fake_object' ) ) >> 'the result'
s.some_object( argumentexpectations.ArgumentIsFakeObjectWithPath( 'yet_another' ) ) >> 'another result'
assert fakeObject(fake.Fake('another_fake_object')) == 'the result'
assert fakeObject(fake.Fake('yet_another')) == 'another result'
def test_FakeObjectExpectation( self ):
fakeObject = fake.Fake('some_object')
fakeArgument = fake.Fake('fake_argument')
with scenario.Scenario() as s:
s.some_object(fake.Fake('fake_argument'))
fakeObject( fakeArgument )
def test_IgnoreArgument( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10 ) >> 'first'
s.some_object( argumentexpectations.IgnoreArgument() ) >> 'second'
assert fakeObject( 10 ) == 'first'
assert fakeObject( "this doens't matter" ) == 'second'
def test_IgnoreCallDetails(self):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10 ) >> 'first'
s.some_object( argumentexpectations.IgnoreCallDetails() ) >> 'second'
s.another_object(argumentexpectations.IgnoreCallDetails())
assert fakeObject( 10 ) == 'first'
assert fakeObject( "this doens't matter", "this doens'nt either", this='does not matter also', that='neither' ) == 'second'
with pytest.raises( testixexception.ExpectationException ):
fakeObject("this is an unexpected call: verify that IgnoreCallDetails() still leaves the Fake object's path verification intact")
def test_KeywordArguments( self ):
fakeObject = fake.Fake('some_object')
with scenario.Scenario() as s:
s.some_object( 10, name = 'Lancelot' ).returns( 'first' )
s.some_object( 11, name = 'Galahad' ).returns( 'second' )
assert fakeObject( 10, name = 'Lancelot' ) == 'first'
with pytest.raises( testixexception.ExpectationException ):
fakeObject( 11, name = 'not Galahad' )
| mit | 1,450,891,443,894,828,800 | 48.83871 | 145 | 0.637217 | false |
kirti3192/spoken-website | cms/admin.py | 2 | 2857 | from django.template.defaultfilters import slugify
from django.contrib import admin
from cms.models import *
from django.conf import settings
from PIL import Image
import glob, os
from cms.forms import *
class SubNavInline(admin.TabularInline):
model = SubNav
extra = 0
class NavAdmin(admin.ModelAdmin):
list_display = ('nav_title', 'permalink', 'position', 'target_new', 'visible', 'created')
inlines = [SubNavInline]
class BlockAdmin(admin.ModelAdmin):
form = AdminBodyForm
list_display = ('title', 'block_location', 'position', 'visible', 'created')
class PageAdmin(admin.ModelAdmin):
form = CmsPageForm
list_display = ('title', 'permalink', 'target_new', 'visible', 'created')
class EventAdmin(admin.ModelAdmin):
form = AdminBodyForm
exclude = ('user',)
list_display = ('user', 'title', 'body', 'event_date', 'source_link', 'created')
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
class NotificationAdmin(admin.ModelAdmin):
exclude = ('user',)
list_display = ('user', 'body', 'start_date', 'expiry_date', 'updated')
def save_model(self, request, obj, form, change):
obj.user = request.user
obj.save()
class NewsTypeAdmin(admin.ModelAdmin):
exclude = ('slug',)
list_display = ('name',)
def save_model(self, request, obj, form, change):
obj.slug = slugify(request.POST['name'])
obj.save()
class NewsAdmin(admin.ModelAdmin):
#form = AdminBodyForm
form = NewsAdditionaFieldAdmin
exclude = ('created_by', 'slug')
list_display = ('title', 'weight','state','picture', 'body', 'url', 'url_title', 'created_by', 'created')
list_filter = ('news_type','state')
def save_model(self, request, obj, form, change):
obj.created_by = request.user
obj.picture = None
obj.slug = slugify(request.POST['title'])
obj.save()
if 'picture' in request.FILES and request.FILES['picture']:
obj.picture = request.FILES['picture']
obj.save()
size = 128, 128
filename = str(obj.picture)
file, ext = os.path.splitext(filename)
if ext != '.pdf' and ext != '':
im = Image.open(obj.picture)
im.thumbnail(size, Image.ANTIALIAS)
ext = ext[1:]
mimeType = ext.upper()
if mimeType == 'JPG':
mimeType = 'JPEG'
im.save(settings.MEDIA_ROOT + "news/" + str(obj.id) + "/" + str(obj.id) + "-thumb." + ext, mimeType)
admin.site.register(Block, BlockAdmin)
admin.site.register(Nav, NavAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(Event, EventAdmin)
admin.site.register(Notification, NotificationAdmin)
admin.site.register(NewsType, NewsTypeAdmin)
admin.site.register(News, NewsAdmin)
| gpl-3.0 | -2,728,689,829,492,137,500 | 34.271605 | 112 | 0.633182 | false |
izelnakri/moses | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/buildbot/buildbot_run.py | 270 | 8338 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'gyp/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'gyp'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
msvs_version='2013',
tests=[
r'test\generator-output\gyptest-actions.py',
r'test\generator-output\gyptest-relocate.py',
r'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| mit | -5,988,519,446,608,699,000 | 32.219124 | 117 | 0.630727 | false |
somic/paasta | tests/cli/test_cmds_get_latest_deployment.py | 1 | 1863 | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import MagicMock
from mock import patch
from paasta_tools.cli.cmds import get_latest_deployment
def test_get_latest_deployment(capfd):
mock_args = MagicMock(
service='',
deploy_group='',
soa_dir='',
)
with patch(
'paasta_tools.cli.cmds.get_latest_deployment.get_currently_deployed_sha',
return_value="FAKE_SHA", autospec=True,
), patch(
'paasta_tools.cli.cmds.get_latest_deployment.validate_service_name', autospec=True,
):
assert get_latest_deployment.paasta_get_latest_deployment(mock_args) == 0
assert "FAKE_SHA" in capfd.readouterr()[0]
def test_get_latest_deployment_no_deployment_tag(capfd):
mock_args = MagicMock(
service='fake_service',
deploy_group='fake_deploy_group',
soa_dir='',
)
with patch(
'paasta_tools.cli.cmds.get_latest_deployment.get_currently_deployed_sha',
return_value=None, autospec=True,
), patch(
'paasta_tools.cli.cmds.get_latest_deployment.validate_service_name', autospec=True,
):
assert get_latest_deployment.paasta_get_latest_deployment(mock_args) == 1
assert "A deployment could not be found for fake_deploy_group in fake_service" in \
capfd.readouterr()[1]
| apache-2.0 | -266,696,243,005,262,700 | 36.26 | 91 | 0.689748 | false |
sunyihuan326/DeltaLab | shuwei_fengge/practice_one/model/tt.py | 1 | 3958 | # coding:utf-8
'''
Created on 2017/12/8.
@author: chk01
'''
import scipy.io as scio
# data = scio.loadmat(file)
# from sklearn.model_selection import train_test_split
#
# print(data['X'].shape)
# print(data['Y'].shape)
# X_train, X_test, Y_train, Y_test = train_test_split(data['X'], data['Y'], test_size=0.2)
# print(X_train.shape)
# print(Y_train.shape)
# print(X_test.shape)
# print(Y_test.shape)
import numpy as np
import scipy.io as scio
import tensorflow as tf
from practice_one.model.utils import *
from tensorflow.contrib.factorization import KMeans
from sklearn.ensemble import AdaBoostClassifier
# print(np.e)
# print(-np.log(np.e / (np.e + 8)))
# ZL = tf.Variable([[0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
# print(ZL.shape)
# Y = tf.constant([[0, 0, 0, 0, 0, 0, 1, 0, 0]], dtype=tf.float32)
# Y = tf.get_variable(dtype=tf.float32, shape=(1, 2), name='tt',initializer=tf.contrib.layers.xavier_initializer())
# cor_op = tf.argmax(Y, 1)
# pre_op = tf.argmax(ZL, 1)
# cost1 = tf.square(tf.cast(cor_op - pre_op, dtype=tf.float32))
# lost = tf.reduce_mean(
# cost1 + tf.nn.softmax_cross_entropy_with_logits(logits=ZL,
# labels=Y))
# # loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
# train_op = tf.train.GradientDescentOptimizer(0.1).minimize(lost)
# init = tf.global_variables_initializer()
# with tf.Session() as sess:
# sess.run(init)
# for i in range(30):
# sess.run(train_op)
# print(sess.run(lost))
# print(sess.run(tf.reduce_mean(cost1)))
# print(sess.run(tf.argmax(ZL, 1)))
# 1.37195
# 2.37195
# parameters = scio.loadmat('kmeans_parameters.mat')
# X_train, X_test, Y_train, Y_test = load_data("face_1_channel_sense.mat")
# print(X_test.shape)
# num_features = 28
# num_classes = 3
#
# X = tf.placeholder(tf.float32, shape=[None, num_features])
# Y = tf.placeholder(tf.float32, shape=[None, num_classes])
#
# kmeans = KMeans(inputs=X, num_clusters=300,
# distance_metric='cosine',
# use_mini_batch=True)
#
# (all_scores, cluster_idx, scores, cluster_centers_initialized, cluster_centers_var, init_op,
# train_op) = kmeans.training_graph()
# cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
#
# # Initialize the variables (i.e. assign their default value)
# init_vars = tf.global_variables_initializer()
#
# # Start TensorFlow session
# sess = tf.Session()
# sess.run(init_vars, feed_dict={X: X_test})
# sess.run(init_op, feed_dict={X: X_test})
# cl = sess.run(cluster_idx, feed_dict={X: X_train})
# print("cl",cl)
# print(len(cl))
# parameters = scio.loadmat('kmeans_parameters.mat')
# print("parameters",parameters['labels_map'][0])
# labels_map = tf.convert_to_tensor(parameters['labels_map'][0])
#
# # Evaluation ops
# # Lookup: centroid_id -> label
# cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
#
# # Test Model
# test_x, test_y = X_test, Y_test
# with sess.as_default():
# cluster_label = cluster_label.eval(feed_dict={X: X_test})
#
# c = 0
# for i in range(len(cluster_label)):
# if abs(cluster_label[i] - np.argmax(Y_train, 1)[i]) > 1:
# c += 1. / len(cluster_label)
# print(c)
# tt = scio.loadmat("tt_cluster_label.mat")
# sense = scio.loadmat("sense_cluster.mat")
# tt = tt["tt"][0]
# se = sense["sense"][0]
# for i in range(len(tt)):
# if tt[i] != se[i]:
# print(i, tt[i], se[i])
# # print('correct_prediction', correct_prediction)
# index = [1, 2, 0, 2, 1, 2]
# indice = [[0, 2, 1, 1, 1], [0, 1, 1, 2, 1]]
# a = tf.one_hot(index, 3, axis=0)
# b = tf.one_hot(indice, 3, axis=1)
# with tf.Session() as sess:
# print(sess.run(a))
# print("b", sess.run(b))
file = "face_1_channel_sense"
X_train, X_test, Y_train, Y_test = load_data(file)
clf = AdaBoostClassifier(n_estimators=100)
Y_train = np.argmax(Y_train, 1)
c = clf.fit(X_train, Y_train)
print(c)
| mit | -5,570,555,896,047,293,000 | 30.919355 | 115 | 0.629358 | false |
huanchenz/STX-h-store | third_party/python/boto/ec2/autoscale/policy.py | 24 | 5549 | # Copyright (c) 2009-2010 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2011 Jann Kleen
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
from boto.ec2.elb.listelement import ListElement
class Alarm(object):
def __init__(self, connection=None):
self.connection = connection
self.name = None
self.alarm_arn = None
def __repr__(self):
return 'Alarm:%s' % self.name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'AlarmName':
self.name = value
elif name == 'AlarmARN':
self.alarm_arn = value
else:
setattr(self, name, value)
class AdjustmentType(object):
def __init__(self, connection=None):
self.connection = connection
self.adjustment_types = ListElement([])
def __repr__(self):
return 'AdjustmentType:%s' % self.adjustment_types
def startElement(self, name, attrs, connection):
if name == 'AdjustmentType':
return self.adjustment_types
def endElement(self, name, value, connection):
return
class MetricCollectionTypes(object):
class BaseType(object):
arg = ''
def __init__(self, connection):
self.connection = connection
self.val = None
def __repr__(self):
return '%s:%s' % (self.arg, self.val)
def startElement(self, name, attrs, connection):
return
def endElement(self, name, value, connection):
if name == self.arg:
self.val = value
class Metric(BaseType):
arg = 'Metric'
class Granularity(BaseType):
arg = 'Granularity'
def __init__(self, connection=None):
self.connection = connection
self.metrics = []
self.granularities = []
def __repr__(self):
return 'MetricCollectionTypes:<%s, %s>' % (self.metrics, self.granularities)
def startElement(self, name, attrs, connection):
if name == 'Granularities':
self.granularities = ResultSet([('member', self.Granularity)])
return self.granularities
elif name == 'Metrics':
self.metrics = ResultSet([('member', self.Metric)])
return self.metrics
def endElement(self, name, value, connection):
return
class ScalingPolicy(object):
def __init__(self, connection=None, **kwargs):
"""
Scaling Policy
:type name: str
:param name: Name of scaling policy.
:type adjustment_type: str
:param adjustment_type: Specifies the type of adjustment. Valid values are `ChangeInCapacity`, `ExactCapacity` and `PercentChangeInCapacity`.
:type as_name: str or int
:param as_name: Name or ARN of the Auto Scaling Group.
:type scaling_adjustment: int
:param scaling_adjustment: Value of adjustment (type specified in `adjustment_type`).
:type cooldown: int
:param cooldown: Time (in seconds) before Alarm related Scaling Activities can start after the previous Scaling Activity ends.
"""
self.name = kwargs.get('name', None)
self.adjustment_type = kwargs.get('adjustment_type', None)
self.as_name = kwargs.get('as_name', None)
self.scaling_adjustment = kwargs.get('scaling_adjustment', None)
self.cooldown = kwargs.get('cooldown', None)
self.connection = connection
def __repr__(self):
return 'ScalingPolicy(%s group:%s adjustment:%s)' % (self.name,
self.as_name,
self.adjustment_type)
def startElement(self, name, attrs, connection):
if name == 'Alarms':
self.alarms = ResultSet([('member', Alarm)])
return self.alarms
def endElement(self, name, value, connection):
if name == 'PolicyName':
self.name = value
elif name == 'AutoScalingGroupName':
self.as_name = value
elif name == 'PolicyARN':
self.policy_arn = value
elif name == 'ScalingAdjustment':
self.scaling_adjustment = int(value)
elif name == 'Cooldown':
self.cooldown = int(value)
elif name == 'AdjustmentType':
self.adjustment_type = value
def delete(self):
return self.connection.delete_policy(self.name, self.as_name)
| gpl-3.0 | 3,114,703,872,488,029,000 | 34.8 | 149 | 0.623356 | false |
ebar0n/django | django/contrib/postgres/validators.py | 87 | 2675 | from django.core.exceptions import ValidationError
from django.core.validators import (
MaxLengthValidator, MaxValueValidator, MinLengthValidator,
MinValueValidator,
)
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _, ngettext_lazy
class ArrayMaxLengthValidator(MaxLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no more than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no more than %(limit_value)d.',
'limit_value')
class ArrayMinLengthValidator(MinLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no fewer than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no fewer than %(limit_value)d.',
'limit_value')
@deconstructible
class KeysValidator:
"""A validator designed for HStore to require/restrict keys."""
messages = {
'missing_keys': _('Some keys were missing: %(keys)s'),
'extra_keys': _('Some unknown keys were provided: %(keys)s'),
}
strict = False
def __init__(self, keys, strict=False, messages=None):
self.keys = set(keys)
self.strict = strict
if messages is not None:
self.messages = {**self.messages, **messages}
def __call__(self, value):
keys = set(value)
missing_keys = self.keys - keys
if missing_keys:
raise ValidationError(
self.messages['missing_keys'],
code='missing_keys',
params={'keys': ', '.join(missing_keys)},
)
if self.strict:
extra_keys = keys - self.keys
if extra_keys:
raise ValidationError(
self.messages['extra_keys'],
code='extra_keys',
params={'keys': ', '.join(extra_keys)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.keys == other.keys and
self.messages == other.messages and
self.strict == other.strict
)
class RangeMaxValueValidator(MaxValueValidator):
def compare(self, a, b):
return a.upper is None or a.upper > b
message = _('Ensure that this range is completely less than or equal to %(limit_value)s.')
class RangeMinValueValidator(MinValueValidator):
def compare(self, a, b):
return a.lower is None or a.lower < b
message = _('Ensure that this range is completely greater than or equal to %(limit_value)s.')
| bsd-3-clause | 3,792,180,124,299,452,000 | 34.197368 | 97 | 0.613084 | false |
Hasimir/brython | www/src/Lib/test/test_sched.py | 23 | 6660 | #!/usr/bin/env python
import queue
import sched
import time
import unittest
from test import support
try:
import threading
except ImportError:
threading = None
TIMEOUT = 10
class Timer:
def __init__(self):
self._cond = threading.Condition()
self._time = 0
self._stop = 0
def time(self):
with self._cond:
return self._time
# increase the time but not beyond the established limit
def sleep(self, t):
assert t >= 0
with self._cond:
t += self._time
while self._stop < t:
self._time = self._stop
self._cond.wait()
self._time = t
# advance time limit for user code
def advance(self, t):
assert t >= 0
with self._cond:
self._stop += t
self._cond.notify_all()
class TestCase(unittest.TestCase):
def test_enter(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.5, 0.4, 0.3, 0.2, 0.1]:
z = scheduler.enter(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.1, 0.2, 0.3, 0.4, 0.5])
def test_enterabs(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
scheduler.run()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_enter_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
scheduler.enter(1, 1, fun, (1,))
scheduler.enter(3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
for x in [4, 5, 2]:
z = scheduler.enter(x - 1, 1, fun, (x,))
timer.advance(2)
self.assertEqual(q.get(timeout=TIMEOUT), 2)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 5)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 5)
def test_priority(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for priority in [1, 2, 3, 4, 5]:
z = scheduler.enterabs(0.01, priority, fun, (priority,))
scheduler.run()
self.assertEqual(l, [1, 2, 3, 4, 5])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
event1 = scheduler.enterabs(now + 0.01, 1, fun, (0.01,))
event2 = scheduler.enterabs(now + 0.02, 1, fun, (0.02,))
event3 = scheduler.enterabs(now + 0.03, 1, fun, (0.03,))
event4 = scheduler.enterabs(now + 0.04, 1, fun, (0.04,))
event5 = scheduler.enterabs(now + 0.05, 1, fun, (0.05,))
scheduler.cancel(event1)
scheduler.cancel(event5)
scheduler.run()
self.assertEqual(l, [0.02, 0.03, 0.04])
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_cancel_concurrent(self):
q = queue.Queue()
fun = q.put
timer = Timer()
scheduler = sched.scheduler(timer.time, timer.sleep)
now = timer.time()
event1 = scheduler.enterabs(now + 1, 1, fun, (1,))
event2 = scheduler.enterabs(now + 2, 1, fun, (2,))
event4 = scheduler.enterabs(now + 4, 1, fun, (4,))
event5 = scheduler.enterabs(now + 5, 1, fun, (5,))
event3 = scheduler.enterabs(now + 3, 1, fun, (3,))
t = threading.Thread(target=scheduler.run)
t.start()
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 1)
self.assertTrue(q.empty())
scheduler.cancel(event2)
scheduler.cancel(event5)
timer.advance(1)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 3)
self.assertTrue(q.empty())
timer.advance(1)
self.assertEqual(q.get(timeout=TIMEOUT), 4)
self.assertTrue(q.empty())
timer.advance(1000)
t.join(timeout=TIMEOUT)
self.assertFalse(t.is_alive())
self.assertTrue(q.empty())
self.assertEqual(timer.time(), 4)
def test_empty(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
self.assertTrue(scheduler.empty())
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
z = scheduler.enterabs(x, 1, fun, (x,))
self.assertFalse(scheduler.empty())
scheduler.run()
self.assertTrue(scheduler.empty())
def test_queue(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
now = time.time()
e5 = scheduler.enterabs(now + 0.05, 1, fun)
e1 = scheduler.enterabs(now + 0.01, 1, fun)
e2 = scheduler.enterabs(now + 0.02, 1, fun)
e4 = scheduler.enterabs(now + 0.04, 1, fun)
e3 = scheduler.enterabs(now + 0.03, 1, fun)
# queue property is supposed to return an order list of
# upcoming events
self.assertEqual(scheduler.queue, [e1, e2, e3, e4, e5])
def test_args_kwargs(self):
flag = []
def fun(*a, **b):
flag.append(None)
self.assertEqual(a, (1,2,3))
self.assertEqual(b, {"foo":1})
scheduler = sched.scheduler(time.time, time.sleep)
z = scheduler.enterabs(0.01, 1, fun, argument=(1,2,3), kwargs={"foo":1})
scheduler.run()
self.assertEqual(flag, [None])
def test_run_non_blocking(self):
l = []
fun = lambda x: l.append(x)
scheduler = sched.scheduler(time.time, time.sleep)
for x in [10, 9, 8, 7, 6]:
scheduler.enter(x, 1, fun, (x,))
scheduler.run(blocking=False)
self.assertEqual(l, [])
def test_main():
support.run_unittest(TestCase)
if __name__ == "__main__":
test_main()
| bsd-3-clause | 6,685,348,758,801,012,000 | 31.647059 | 80 | 0.555856 | false |
t794104/ansible | test/units/modules/network/junos/test_junos_command.py | 68 | 6199 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from lxml.etree import fromstring
except ImportError:
from xml.etree.ElementTree import fromstring
from units.compat.mock import patch
from ansible.modules.network.junos import junos_command
from units.modules.utils import set_module_args
from .junos_module import TestJunosModule, load_fixture
RPC_CLI_MAP = {
'get-software-information': 'show version'
}
class TestJunosCommandModule(TestJunosModule):
module = junos_command
def setUp(self):
super(TestJunosCommandModule, self).setUp()
self.mock_conn = patch('ansible.module_utils.network.junos.junos.Connection')
self.conn = self.mock_conn.start()
self.mock_netconf = patch('ansible.module_utils.network.junos.junos.NetconfConnection')
self.netconf_conn = self.mock_netconf.start()
self.mock_exec_rpc = patch('ansible.modules.network.junos.junos_command.exec_rpc')
self.exec_rpc = self.mock_exec_rpc.start()
self.mock_netconf_rpc = patch('ansible.module_utils.network.common.netconf.NetconfConnection')
self.netconf_rpc = self.mock_netconf_rpc.start()
self.mock_get_connection = patch('ansible.modules.network.junos.junos_command.get_connection')
self.get_connection = self.mock_get_connection.start()
self.mock_get_capabilities = patch('ansible.modules.network.junos.junos_command.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'netconf'}
def tearDown(self):
super(TestJunosCommandModule, self).tearDown()
self.mock_conn.stop()
self.mock_netconf.stop()
self.mock_get_capabilities.stop()
self.mock_netconf_rpc.stop()
self.mock_exec_rpc.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None, format='text', changed=False):
def load_from_file(*args, **kwargs):
element = fromstring(args[1])
if element.text:
path = str(element.text)
else:
path = RPC_CLI_MAP[str(element.tag)]
filename = path.replace(' ', '_')
filename = '%s_%s.txt' % (filename, format)
return load_fixture(filename)
self.exec_rpc.side_effect = load_from_file
def test_junos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_wait_for(self):
wait_for = 'result[0] contains "Junos:"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_junos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 10)
def test_junos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 2)
def test_junos_command_match_any(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_junos_command_match_all(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "JUNOS Software Release"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_junos_command_match_all_failure(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_junos_command_simple_json(self):
set_module_args(dict(commands=['show version'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
def test_junos_command_simple_rpc_text(self):
set_module_args(dict(rpcs=['get-software-information'], display='text'))
result = self.execute_module(format='text')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_simple_rpc_json(self):
set_module_args(dict(rpcs=['get-software-information'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
| gpl-3.0 | 6,169,681,198,293,533,000 | 40.326667 | 106 | 0.661236 | false |
laayis/yowsup | yowsup/demos/echoclient/layer.py | 60 | 1646 | from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
class EchoLayer(YowInterfaceLayer):
@ProtocolEntityCallback("message")
def onMessage(self, messageProtocolEntity):
if messageProtocolEntity.getType() == 'text':
self.onTextMessage(messageProtocolEntity)
elif messageProtocolEntity.getType() == 'media':
self.onMediaMessage(messageProtocolEntity)
self.toLower(messageProtocolEntity.forward(messageProtocolEntity.getFrom()))
self.toLower(messageProtocolEntity.ack())
self.toLower(messageProtocolEntity.ack(True))
@ProtocolEntityCallback("receipt")
def onReceipt(self, entity):
self.toLower(entity.ack())
def onTextMessage(self,messageProtocolEntity):
# just print info
print("Echoing %s to %s" % (messageProtocolEntity.getBody(), messageProtocolEntity.getFrom(False)))
def onMediaMessage(self, messageProtocolEntity):
# just print info
if messageProtocolEntity.getMediaType() == "image":
print("Echoing image %s to %s" % (messageProtocolEntity.url, messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "location":
print("Echoing location (%s, %s) to %s" % (messageProtocolEntity.getLatitude(), messageProtocolEntity.getLongitude(), messageProtocolEntity.getFrom(False)))
elif messageProtocolEntity.getMediaType() == "vcard":
print("Echoing vcard (%s, %s) to %s" % (messageProtocolEntity.getName(), messageProtocolEntity.getCardData(), messageProtocolEntity.getFrom(False)))
| gpl-3.0 | 2,313,680,449,194,412,500 | 46.028571 | 168 | 0.702309 | false |
dgarros/ansible | lib/ansible/modules/cloud/cloudstack/cs_portforward.py | 51 | 14301 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
required: false
default: null
state:
description:
- State of the port forwarding rule.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
protocol:
description:
- Protocol of the port forwarding rule.
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
required: false
default: null
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
required: false
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
required: false
default: false
network:
description:
- Name of the network.
required: false
default: null
version_added: "2.3"
vpc:
description:
- Name of the VPC.
required: false
default: null
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
required: false
default: null
account:
description:
- Account the C(vm) is related to.
required: false
default: null
project:
description:
- Name of the project the C(vm) is located in.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# 1.2.3.4:80 -> web01:8080
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
# forward SSH and open firewall
- local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
# forward DNS traffic, but do not open firewall
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
# remove ssh port forwarding
- local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
returned: success
type: string
sample: dmz
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
public_end_port = self.get_or_fallback('public_end_port', 'public_port')
private_port = self.module.params.get('private_port')
private_end_port = self.get_or_fallback('private_end_port', 'private_port')
args = {}
args['ipaddressid'] = self.get_ip_address(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
portforwarding_rules = self.cs.listPortForwardingRules(**args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule=portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['openfirewall'] = self.module.params.get('open_firewall')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['networkid'] = self.get_network(key='id')
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {}
args['protocol'] = self.module.params.get('protocol')
args['publicport'] = self.module.params.get('public_port')
args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
args['privateport'] = self.module.params.get('private_port')
args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
args['vmguestip'] = self.get_vm_guest_ip()
args['ipaddressid'] = self.get_ip_address(key='id')
args['virtualmachineid'] = self.get_vm(key='id')
args['networkid'] = self.get_network(key='id')
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.cs.updatePortForwardingRule(**args)
self.absent_portforwarding_rule()
portforwarding_rule = self.cs.createPortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {}
args['id'] = portforwarding_rule['id']
if not self.module.check_mode:
res = self.cs.deletePortForwardingRule(**args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
network_name = self.get_network(key='name')
if network_name:
self.result['network'] = network_name
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.result['vpc'] = vpc_name
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
protocol= dict(choices=['tcp', 'udp'], default='tcp'),
public_port = dict(type='int', required=True),
public_end_port = dict(type='int', default=None),
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
vpc = dict(default=None),
network = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag'], default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -3,636,840,072,768,961,500 | 31.795872 | 116 | 0.618645 | false |
Sarah-Alsinan/muypicky | lib/python3.6/site-packages/pip/_vendor/pkg_resources/__init__.py | 320 | 103230 | # coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import tempfile
import textwrap
import itertools
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
if (3, 0) < sys.version_info < (3, 3):
msg = (
"Support for Python 3.0-3.2 has been dropped. Future versions "
"will fail here."
)
warnings.warn(msg)
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
class _SetuptoolsVersionMixin(object):
def __hash__(self):
return super(_SetuptoolsVersionMixin, self).__hash__()
def __lt__(self, other):
if isinstance(other, tuple):
return tuple(self) < other
else:
return super(_SetuptoolsVersionMixin, self).__lt__(other)
def __le__(self, other):
if isinstance(other, tuple):
return tuple(self) <= other
else:
return super(_SetuptoolsVersionMixin, self).__le__(other)
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
else:
return super(_SetuptoolsVersionMixin, self).__eq__(other)
def __ge__(self, other):
if isinstance(other, tuple):
return tuple(self) >= other
else:
return super(_SetuptoolsVersionMixin, self).__ge__(other)
def __gt__(self, other):
if isinstance(other, tuple):
return tuple(self) > other
else:
return super(_SetuptoolsVersionMixin, self).__gt__(other)
def __ne__(self, other):
if isinstance(other, tuple):
return tuple(self) != other
else:
return super(_SetuptoolsVersionMixin, self).__ne__(other)
def __getitem__(self, key):
return tuple(self)[key]
def __iter__(self):
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
# pad for numeric comparison
yield part.zfill(8)
else:
yield '*' + part
# ensure that alpha/beta/candidate are before final
yield '*final'
def old_parse_version(s):
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
# remove '-' before a prerelease tag
if part < '*final':
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
# Warn for use of this function
warnings.warn(
"You have iterated over the result of "
"pkg_resources.parse_version. This is a legacy behavior which is "
"inconsistent with the new version class introduced in setuptools "
"8.0. In most cases, conversion to a tuple is unnecessary. For "
"comparison of versions, sort the Version instances directly. If "
"you have another use case requiring the tuple, please file a "
"bug with the setuptools project describing that need.",
RuntimeWarning,
stacklevel=1,
)
for part in old_parse_version(str(self)):
yield part
class SetuptoolsVersion(_SetuptoolsVersionMixin, packaging.version.Version):
pass
class SetuptoolsLegacyVersion(_SetuptoolsVersionMixin,
packaging.version.LegacyVersion):
pass
def parse_version(v):
try:
return SetuptoolsVersion(v)
except packaging.version.InvalidVersion:
return SetuptoolsLegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
try:
# Python 2.7 or >=3.2
from sysconfig import get_platform
except ImportError:
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(req, ws, installer)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(self, plugin_env, full_env=None, installer=None,
fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (None,)
)
return not req.marker or any(extra_evals)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.3'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version == self.python) \
and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = ("%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info, name))
def get_metadata(self, name):
if not self.egg_info:
return ""
value = self._get(self._fn(self.egg_info, name))
return value.decode('utf-8') if six.PY3 else value
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_unpacked_egg(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_cls = getattr(importlib_machinery, 'SourceFileLoader',
type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self, path: False
_get = lambda self, path: ''
_listdir = lambda self, path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with ContextualZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ContextualZipFile(zipfile.ZipFile):
"""
Supplement ZipFile class to support context manager for Python 2.6
"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""
Construct a ZipFile or ContextualZipFile as appropriate
"""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.6 and 3.2 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if _is_unpacked_egg(subitem):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(os.listdir(path_item))
for entry in path_item_entries:
lower = entry.lower()
if lower.endswith('.egg-info') or lower.endswith('.dist-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
if len(os.listdir(fullpath)) == 0:
# Empty egg directory, skip.
continue
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item, entry, metadata, precedence=DEVELOP_DIST
)
elif not only and _is_unpacked_egg(entry):
dists = find_distributions(os.path.join(path_item, entry))
for dist in dists:
yield dist
elif not only and lower.endswith('.egg-link'):
with open(os.path.join(path_item, entry)) as entry_file:
entry_lines = entry_file.readlines()
for line in entry_lines:
if not line.strip():
continue
path = os.path.join(path_item, line.rstrip())
dists = find_distributions(path)
for item in dists:
yield item
break
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
orig_path.sort(key=position_in_sys_path)
module.__path__[:] = [_normalize_cached(p) for p in orig_path]
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
path.lower().endswith('.egg')
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
DeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
is_version_line = lambda line: line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = _version_from_file(self._get_metadata(self.PKG_INFO))
if version is None:
tmpl = "Missing 'Version:' header and/or %s file"
raise ValueError(tmpl % self.PKG_INFO, self)
return version
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
if extra:
if ':' in extra:
extra, marker = extra.split(':', 1)
if invalid_marker(marker):
# XXX warn
reqs = []
elif not evaluate_marker(marker):
reqs = []
extra = safe_extra(extra) or None
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = _version_from_file(self._get_metadata(self.PKG_INFO))
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""Wrap an actual or potential sys.path entry w/metadata, .dist-info style"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object):
pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, 0o755)
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
dist = None # ensure dist is defined for del dist below
for dist in working_set:
dist.activate(replace=False)
del dist
add_activation_listener(lambda dist: dist.activate(replace=True), existing=False)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
| mit | 7,756,767,067,461,531,000 | 32.823067 | 91 | 0.59819 | false |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/Django-1.6.10/tests/one_to_one_regress/tests.py | 107 | 8452 | from __future__ import absolute_import
from django.test import TestCase
from .models import Place, Restaurant, Bar, Favorites, Target, UndergroundBar
class OneToOneRegressionTests(TestCase):
def setUp(self):
self.p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
self.p1.save()
self.r1 = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r1.save()
self.b1 = Bar(place=self.p1, serves_cocktails=False)
self.b1.save()
def test_reverse_relationship_cache_cascade(self):
"""
Regression test for #9023: accessing the reverse relationship shouldn't
result in a cascading delete().
"""
bar = UndergroundBar.objects.create(place=self.p1, serves_cocktails=False)
# The bug in #9023: if you access the one-to-one relation *before*
# setting to None and deleting, the cascade happens anyway.
self.p1.undergroundbar
bar.place.name='foo'
bar.place = None
bar.save()
self.p1.delete()
self.assertEqual(Place.objects.all().count(), 0)
self.assertEqual(UndergroundBar.objects.all().count(), 1)
def test_create_models_m2m(self):
"""
Regression test for #1064 and #1506
Check that we create models via the m2m relation if the remote model
has a OneToOneField.
"""
f = Favorites(name = 'Fred')
f.save()
f.restaurants = [self.r1]
self.assertQuerysetEqual(
f.restaurants.all(),
['<Restaurant: Demon Dogs the restaurant>']
)
def test_reverse_object_cache(self):
"""
Regression test for #7173
Check that the name of the cache for the reverse object is correct.
"""
self.assertEqual(self.p1.restaurant, self.r1)
self.assertEqual(self.p1.bar, self.b1)
def test_related_object_cache(self):
""" Regression test for #6886 (the related-object cache) """
# Look up the objects again so that we get "fresh" objects
p = Place.objects.get(name="Demon Dogs")
r = p.restaurant
# Accessing the related object again returns the exactly same object
self.assertTrue(p.restaurant is r)
# But if we kill the cache, we get a new object
del p._restaurant_cache
self.assertFalse(p.restaurant is r)
# Reassigning the Restaurant object results in an immediate cache update
# We can't use a new Restaurant because that'll violate one-to-one, but
# with a new *instance* the is test below will fail if #6886 regresses.
r2 = Restaurant.objects.get(pk=r.pk)
p.restaurant = r2
self.assertTrue(p.restaurant is r2)
# Assigning None succeeds if field is null=True.
ug_bar = UndergroundBar.objects.create(place=p, serves_cocktails=False)
ug_bar.place = None
self.assertTrue(ug_bar.place is None)
# Assigning None fails: Place.restaurant is null=False
self.assertRaises(ValueError, setattr, p, 'restaurant', None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, p, 'restaurant', p)
# Creation using keyword argument should cache the related object.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place=p)
self.assertTrue(r.place is p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Place()
r = Restaurant(place=p)
self.assertTrue(r.place is p)
# Creation using attname keyword argument and an id will cause the related
# object to be fetched.
p = Place.objects.get(name="Demon Dogs")
r = Restaurant(place_id=p.id)
self.assertFalse(r.place is p)
self.assertEqual(r.place, p)
def test_filter_one_to_one_relations(self):
"""
Regression test for #9968
filtering reverse one-to-one relations with primary_key=True was
misbehaving. We test both (primary_key=True & False) cases here to
prevent any reappearance of the problem.
"""
t = Target.objects.create()
self.assertQuerysetEqual(
Target.objects.filter(pointer=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer=None),
[]
)
self.assertQuerysetEqual(
Target.objects.filter(pointer2=None),
['<Target: Target object>']
)
self.assertQuerysetEqual(
Target.objects.exclude(pointer2=None),
[]
)
def test_reverse_object_does_not_exist_cache(self):
"""
Regression for #13839 and #17439.
DoesNotExist on a reverse one-to-one relation is cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
with self.assertNumQueries(1):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
with self.assertNumQueries(0):
with self.assertRaises(Restaurant.DoesNotExist):
p.restaurant
def test_reverse_object_cached_when_related_is_accessed(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is cached
when the origin is accessed through the reverse relation.
"""
# Use a fresh object without caches
r = Restaurant.objects.get(pk=self.r1.pk)
p = r.place
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, r)
def test_related_object_cached_when_reverse_is_accessed(self):
"""
Regression for #13839 and #17439.
The origin of a one-to-one relation is cached
when the target is accessed through the reverse relation.
"""
# Use a fresh object without caches
p = Place.objects.get(pk=self.p1.pk)
r = p.restaurant
with self.assertNumQueries(0):
self.assertEqual(r.place, p)
def test_reverse_object_cached_when_related_is_set(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
p = Place(name='Zombie Cats', address='Not sure')
p.save()
self.r1.place = p
self.r1.save()
with self.assertNumQueries(0):
self.assertEqual(p.restaurant, self.r1)
def test_reverse_object_cached_when_related_is_unset(self):
"""
Regression for #13839 and #17439.
The target of a one-to-one relation is always cached.
"""
b = UndergroundBar(place=self.p1, serves_cocktails=True)
b.save()
with self.assertNumQueries(0):
self.assertEqual(self.p1.undergroundbar, b)
b.place = None
b.save()
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
self.p1.undergroundbar
def test_get_reverse_on_unsaved_object(self):
"""
Regression for #18153 and #19089.
Accessing the reverse relation on an unsaved object
always raises an exception.
"""
p = Place()
# When there's no instance of the origin of the one-to-one
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there's one instance of the origin
# (p.undergroundbar used to return that instance)
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
UndergroundBar.objects.create()
# When there are several instances of the origin
with self.assertNumQueries(0):
with self.assertRaises(UndergroundBar.DoesNotExist):
p.undergroundbar
def test_set_reverse_on_unsaved_object(self):
"""
Writing to the reverse relation on an unsaved object
is impossible too.
"""
p = Place()
b = UndergroundBar.objects.create()
with self.assertNumQueries(0):
with self.assertRaises(ValueError):
p.undergroundbar = b
| apache-2.0 | -6,940,051,416,413,765,000 | 33.639344 | 85 | 0.609915 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.