repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
adrn/ebak | ebak/singleline/test_data.py | 1 | 1785 | from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Standard library
from os.path import exists, join
# Third-party
import astropy.time as atime
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
import pytest
from .data import RVData
def test_rvdata():
# test various initializations
t = np.random.uniform(55555., 56012., size=1024)
rv = 100 * np.sin(0.5*t) * u.km/u.s
ivar = 1 / (np.random.normal(0,5,size=1024)*u.km/u.s)**2
RVData(t=t, rv=rv, ivar=ivar)
t = atime.Time(t, format='mjd', scale='utc')
RVData(t=t, rv=rv, ivar=ivar)
with pytest.raises(TypeError):
RVData(t=t, rv=rv.value, ivar=ivar)
with pytest.raises(TypeError):
RVData(t=t, rv=rv, ivar=ivar.value)
# check that copy works
t = atime.Time(t, format='mjd', scale='utc')
data1 = RVData(t=t, rv=rv, ivar=ivar)
data2 = data1.copy()
data1._t *= 1.5
data1._rv *= 1.5
data1._ivar *= 1.5
assert np.all(data2._t != data1._t)
assert np.all(data2._rv != data1._rv)
assert np.all(data2._ivar != data1._ivar)
# check that plotting at least succeeds (TODO: could be better)
data1.plot()
data1.plot(color='r')
data1.plot(ax=plt.gca())
# try classmethod
_basepath = '/Users/adrian/projects/ebak'
if exists(_basepath):
print("running classmethod test")
apogee_id = "2M03080601+7950502"
data = RVData.from_apogee(join(_basepath, 'data', 'allVisit-l30e.2.fits'),
apogee_id=apogee_id)
from astropy.io import fits
d = fits.getdata(join(_basepath, 'data', 'allVisit-l30e.2.fits'), 1)
data = RVData.from_apogee(d[d['APOGEE_ID'].astype(str) == apogee_id])
| mit | -6,489,791,981,863,390,000 | 27.790323 | 82 | 0.621289 | false |
danaukes/popupcad | dev_tools/hierarchy.py | 2 | 4333 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
#from pynamics.tree_node import TreeNode
from dev_tools.acyclicdirectedgraph import Node,AcyclicDirectedGraph
import random
#import numpy
#import yaml
def level(connections):
return [item.level for item in connections]
def overlaps(connections,connection):
return [connection.overlaps(item) for item in connections]
def max_overlapping_levels(connections,connection):
return max([item.level for item in connections if connection.overlaps(item)])
def num_levels(connections):
levels = [c.level for c in connections]
num_levels = len(set(levels))
return num_levels
class Operation(Node):
def __init__(self,name,*args,**kwargs):
super(Operation,self).__init__(*args,**kwargs)
self.name=name
def __str__(self):
return self.name
def __repr__(self):
return str(self)
class Connection(object):
def __init__(self,list1,parent,child):
self.list1 = list1
self.parent = parent
self.child = child
@property
def ii(self):
return self.list1.index(self.parent)
@property
def jj(self):
return self.list1.index(self.child)
@property
def hops(self):
return self.jj-self.ii
def get_level(self):
try:
return self._level
except AttributeError:
self._level = -1
return self._level
def set_level(self,level):
self._level = level
level = property(get_level,set_level)
def __str__(self):
return '{0} --> {1}'.format(self.parent,self.child)
def __repr__(self):
return str(self)
def segments(self):
a = range(self.ii,self.jj)
b = range(self.ii+1,self.jj+1)
e = [tuple(item) for item in zip(a,b)]
return e
def overlapped_items(self):
a=list(range(self.ii+1,self.jj))
return a
def overlapping_segments(self,other):
my_segments = set(self.segments())
other_segments = set(other.segments())
return my_segments.intersection(other_segments)
def overlaps(self,other):
return not not self.overlapping_segments(other)
def create_sorted_connections(list_in, get_children):
connections = []
for ii,operation in enumerate(list_in):
for child in get_children(operation):
connections.append(Connection(list_in,operation,child))
connections.sort(key=lambda item:(item.hops,item.ii))
for connection in connections:
connection.set_level(max_overlapping_levels(connections,connection)+1)
return connections
if __name__=='__main__':
num_operations = 10
#
# operations = []
# for item in range(num_operations):
# operation = Operation('item '+str(item))
# operations.append(operation)
#
# connection_list = []
# for ii,operation in enumerate(operations[:-1]):
## operation.add_branch(operations[ii+1])
# connection_list.append((operation,operations[ii+1]))
#
#
# num_extra_connections = 10
#
# extras = []
# for ii in range(num_extra_connections):
# a = random.randint(0,num_operations-2)
# b = random.randint(a+1,num_operations-1)
# extras.append((a,b))
## operations[a].add_branch(operations[b])
# connection_list.append((operations[a],operations[b]))
#
# network = AcyclicDirectedGraph(operations,connection_list)
# ----------------------------------------------------------
operations = list(range(num_operations))
connections = {}
for item in operations:
connections[item]=[]
connections[0].append(1)
connections[0].append(4)
connections[0].append(5)
connections[2].append(6)
connections = create_sorted_connections(operations,lambda item:connections[item])
A = [[' ']*num_levels(connections) for ii in range(num_operations)]
for c in connections:
A[c.ii][c.level]='*'
A[c.jj][c.level]='*'
#
for kk in c.overlapped_items():
A[kk][c.level]='|'
# #
for item in A:
string = ''.join(item)
print(string)
#
# | mit | 5,193,853,664,068,871,000 | 27.142857 | 85 | 0.603277 | false |
maxive/erp | addons/stock/tests/test_stock_flow.py | 1 | 107063 | # -*- coding: utf-8 -*-
from odoo.addons.stock.tests.common import TestStockCommon
from odoo.tools import mute_logger, float_round
from odoo.exceptions import UserError
class TestStockFlow(TestStockCommon):
@mute_logger('odoo.addons.base.models.ir_model', 'odoo.models')
def test_00_picking_create_and_transfer_quantity(self):
""" Basic stock operation on incoming and outgoing shipment. """
LotObj = self.env['stock.production.lot']
# ----------------------------------------------------------------------
# Create incoming shipment of product A, B, C, D
# ----------------------------------------------------------------------
# Product A ( 1 Unit ) , Product C ( 10 Unit )
# Product B ( 1 Unit ) , Product D ( 10 Unit )
# Product D ( 5 Unit )
# ----------------------------------------------------------------------
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_a = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 1,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_b = self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 1,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_c = self.MoveObj.create({
'name': self.productC.name,
'product_id': self.productC.id,
'product_uom_qty': 10,
'product_uom': self.productC.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_d = self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 10,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 5,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# ----------------------------------------------------------------------
# Replace pack operation of incoming shipments.
# ----------------------------------------------------------------------
picking_in.action_assign()
move_a.move_line_ids.qty_done = 4
move_b.move_line_ids.qty_done = 5
move_c.move_line_ids.qty_done = 5
move_d.move_line_ids.qty_done = 5
lot2_productC = LotObj.create({'name': 'C Lot 2', 'product_id': self.productC.id})
self.StockPackObj.create({
'product_id': self.productC.id,
'qty_done': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'move_id': move_c.id,
'lot_id': lot2_productC.id,
})
self.StockPackObj.create({
'product_id': self.productD.id,
'qty_done': 2,
'product_uom_id': self.productD.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'move_id': move_d.id
})
# Check incoming shipment total quantity of pack operation
total_qty = sum(self.StockPackObj.search([('move_id', 'in', picking_in.move_lines.ids)]).mapped('qty_done'))
self.assertEqual(total_qty, 23, 'Wrong quantity in pack operation')
# Transfer Incoming Shipment.
picking_in.action_done()
# ----------------------------------------------------------------------
# Check state, quantity and total moves of incoming shipment.
# ----------------------------------------------------------------------
# Check total no of move lines of incoming shipment. move line e disappear from original picking to go in backorder.
self.assertEqual(len(picking_in.move_lines), 4, 'Wrong number of move lines.')
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'done', 'Incoming shipment state should be done.')
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check product A done quantity must be 3 and 1
moves = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_in.id)])
self.assertEqual(moves.product_uom_qty, 4.0, 'Wrong move quantity for product A.')
# Check product B done quantity must be 4 and 1
moves = self.MoveObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_in.id)])
self.assertEqual(moves.product_uom_qty, 5.0, 'Wrong move quantity for product B.')
# Check product C done quantity must be 7
c_done_qty = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_in.id)], limit=1).product_uom_qty
self.assertEqual(c_done_qty, 7.0, 'Wrong move quantity of product C (%s found instead of 7)' % (c_done_qty))
# Check product D done quantity must be 7
d_done_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_in.id)], limit=1).product_uom_qty
self.assertEqual(d_done_qty, 7.0, 'Wrong move quantity of product D (%s found instead of 7)' % (d_done_qty))
# ----------------------------------------------------------------------
# Check Back order of Incoming shipment.
# ----------------------------------------------------------------------
# Check back order created or not.
back_order_in = self.PickingObj.search([('backorder_id', '=', picking_in.id)])
self.assertEqual(len(back_order_in), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(back_order_in.move_lines), 2, 'Wrong number of move lines.')
# Check back order should be created with 3 quantity of product C.
moves = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)])
product_c_qty = [move.product_uom_qty for move in moves]
self.assertEqual(sum(product_c_qty), 3.0, 'Wrong move quantity of product C (%s found instead of 3)' % (product_c_qty))
# Check back order should be created with 8 quantity of product D.
moves = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)])
product_d_qty = [move.product_uom_qty for move in moves]
self.assertEqual(sum(product_d_qty), 8.0, 'Wrong move quantity of product D (%s found instead of 8)' % (product_d_qty))
# ======================================================================
# Create Outgoing shipment with ...
# product A ( 10 Unit ) , product B ( 5 Unit )
# product C ( 3 unit ) , product D ( 10 Unit )
# ======================================================================
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_cust_a = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 10,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_cust_b = self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 5,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_cust_c = self.MoveObj.create({
'name': self.productC.name,
'product_id': self.productC.id,
'product_uom_qty': 3,
'product_uom': self.productC.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_cust_d = self.MoveObj.create({
'name': self.productD.name,
'product_id': self.productD.id,
'product_uom_qty': 10,
'product_uom': self.productD.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# Confirm outgoing shipment.
picking_out.action_confirm()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'confirmed', 'Wrong state of move line.')
# Product assign to outgoing shipments
picking_out.action_assign()
self.assertEqual(move_cust_a.state, 'partially_available', 'Wrong state of move line.')
self.assertEqual(move_cust_b.state, 'assigned', 'Wrong state of move line.')
self.assertEqual(move_cust_c.state, 'assigned', 'Wrong state of move line.')
self.assertEqual(move_cust_d.state, 'partially_available', 'Wrong state of move line.')
# Check availability for product A
aval_a_qty = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', picking_out.id)], limit=1).reserved_availability
self.assertEqual(aval_a_qty, 4.0, 'Wrong move quantity availability of product A (%s found instead of 4)' % (aval_a_qty))
# Check availability for product B
aval_b_qty = self.MoveObj.search([('product_id', '=', self.productB.id), ('picking_id', '=', picking_out.id)], limit=1).reserved_availability
self.assertEqual(aval_b_qty, 5.0, 'Wrong move quantity availability of product B (%s found instead of 5)' % (aval_b_qty))
# Check availability for product C
aval_c_qty = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', picking_out.id)], limit=1).reserved_availability
self.assertEqual(aval_c_qty, 3.0, 'Wrong move quantity availability of product C (%s found instead of 3)' % (aval_c_qty))
# Check availability for product D
aval_d_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', picking_out.id)], limit=1).reserved_availability
self.assertEqual(aval_d_qty, 7.0, 'Wrong move quantity availability of product D (%s found instead of 7)' % (aval_d_qty))
# ----------------------------------------------------------------------
# Replace pack operation of outgoing shipment.
# ----------------------------------------------------------------------
move_cust_a.move_line_ids.qty_done = 2.0
move_cust_b.move_line_ids.qty_done = 3.0
self.StockPackObj.create({
'product_id': self.productB.id,
'qty_done': 2,
'product_uom_id': self.productB.uom_id.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'move_id': move_cust_b.id})
# TODO care if product_qty and lot_id are set at the same times the system do 2 unreserve.
move_cust_c.move_line_ids[0].write({
'qty_done': 2.0,
'lot_id': lot2_productC.id,
})
self.StockPackObj.create({
'product_id': self.productC.id,
'qty_done': 3.0,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'move_id': move_cust_c.id})
move_cust_d.move_line_ids.qty_done = 6.0
# Transfer picking.
picking_out.action_done()
# ----------------------------------------------------------------------
# Check state, quantity and total moves of outgoing shipment.
# ----------------------------------------------------------------------
# check outgoing shipment status.
self.assertEqual(picking_out.state, 'done', 'Wrong state of outgoing shipment.')
# check outgoing shipment total moves and and its state.
self.assertEqual(len(picking_out.move_lines), 4, 'Wrong number of move lines')
for move in picking_out.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
back_order_out = self.PickingObj.search([('backorder_id', '=', picking_out.id)])
# ------------------
# Check back order.
# -----------------
self.assertEqual(len(back_order_out), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(back_order_out.move_lines), 2, 'Wrong number of move lines')
# Check back order should be created with 8 quantity of product A.
product_a_qty = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', back_order_out.id)], limit=1).product_uom_qty
self.assertEqual(product_a_qty, 8.0, 'Wrong move quantity of product A (%s found instead of 8)' % (product_a_qty))
# Check back order should be created with 4 quantity of product D.
product_d_qty = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_out.id)], limit=1).product_uom_qty
self.assertEqual(product_d_qty, 4.0, 'Wrong move quantity of product D (%s found instead of 4)' % (product_d_qty))
# -----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
# of product A, B, C, D
# -----------------------------------------------------------------------
# Check quants and available quantity for product A
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 2.0, 'Expecting 2.0 Unit , got %.4f Unit on location stock!' % (sum(total_qty)))
self.assertEqual(self.productA.qty_available, 2.0, 'Wrong quantity available (%s found instead of 2.0)' % (self.productA.qty_available))
# Check quants and available quantity for product B
quants = self.StockQuantObj.search([('product_id', '=', self.productB.id), ('location_id', '=', self.stock_location)])
self.assertFalse(quants, 'No quant should found as outgoing shipment took everything out of stock.')
self.assertEqual(self.productB.qty_available, 0.0, 'Product B should have zero quantity available.')
# Check quants and available quantity for product C
quants = self.StockQuantObj.search([('product_id', '=', self.productC.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 2.0, 'Expecting 2.0 Unit, got %.4f Unit on location stock!' % (sum(total_qty)))
self.assertEqual(self.productC.qty_available, 2.0, 'Wrong quantity available (%s found instead of 2.0)' % (self.productC.qty_available))
# Check quants and available quantity for product D
quant = self.StockQuantObj.search([('product_id', '=', self.productD.id), ('location_id', '=', self.stock_location)], limit=1)
self.assertEqual(quant.quantity, 1.0, 'Expecting 1.0 Unit , got %.4f Unit on location stock!' % (quant.quantity))
self.assertEqual(self.productD.qty_available, 1.0, 'Wrong quantity available (%s found instead of 1.0)' % (self.productD.qty_available))
# -----------------------------------------------------------------------
# Back Order of Incoming shipment
# -----------------------------------------------------------------------
lot3_productC = LotObj.create({'name': 'Lot 3', 'product_id': self.productC.id})
lot4_productC = LotObj.create({'name': 'Lot 4', 'product_id': self.productC.id})
lot5_productC = LotObj.create({'name': 'Lot 5', 'product_id': self.productC.id})
lot6_productC = LotObj.create({'name': 'Lot 6', 'product_id': self.productC.id})
lot1_productD = LotObj.create({'name': 'Lot 1', 'product_id': self.productD.id})
LotObj.create({'name': 'Lot 2', 'product_id': self.productD.id})
# Confirm back order of incoming shipment.
back_order_in.action_confirm()
self.assertEqual(back_order_in.state, 'assigned', 'Wrong state of incoming shipment back order: %s instead of %s' % (back_order_in.state, 'assigned'))
for move in back_order_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# ----------------------------------------------------------------------
# Replace pack operation (Back order of Incoming shipment)
# ----------------------------------------------------------------------
packD = self.StockPackObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)], order='product_qty')
self.assertEqual(len(packD), 1, 'Wrong number of pack operation.')
packD[0].write({
'qty_done': 8,
'lot_id': lot1_productD.id,
})
packCs = self.StockPackObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)], limit=1)
packCs.write({
'qty_done': 1,
'lot_id': lot3_productC.id,
})
self.StockPackObj.create({
'product_id': self.productC.id,
'qty_done': 1,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot4_productC.id,
})
self.StockPackObj.create({
'product_id': self.productC.id,
'qty_done': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot5_productC.id,
})
self.StockPackObj.create({
'product_id': self.productC.id,
'qty_done': 2,
'product_uom_id': self.productC.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id,
'lot_id': lot6_productC.id,
})
self.StockPackObj.create({
'product_id': self.productA.id,
'qty_done': 10,
'product_uom_id': self.productA.uom_id.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'picking_id': back_order_in.id
})
back_order_in.action_done()
# ----------------------------------------------------------------------
# Check state, quantity and total moves (Back order of Incoming shipment).
# ----------------------------------------------------------------------
# Check total no of move lines.
self.assertEqual(len(back_order_in.move_lines), 3, 'Wrong number of move lines')
# Check incoming shipment state must be 'Done'.
self.assertEqual(back_order_in.state, 'done', 'Wrong state of picking.')
# Check incoming shipment move lines state must be 'Done'.
for move in back_order_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move lines.')
# Check product A done quantity must be 10
movesA = self.MoveObj.search([('product_id', '=', self.productA.id), ('picking_id', '=', back_order_in.id)])
self.assertEqual(movesA.product_uom_qty, 10, "Wrong move quantity of product A (%s found instead of 10)" % (movesA.product_uom_qty))
# Check product C done quantity must be 3.0, 1.0, 2.0
movesC = self.MoveObj.search([('product_id', '=', self.productC.id), ('picking_id', '=', back_order_in.id)])
self.assertEqual(movesC.product_uom_qty, 6.0, 'Wrong quantity of moves product C.')
# Check product D done quantity must be 5.0 and 3.0
movesD = self.MoveObj.search([('product_id', '=', self.productD.id), ('picking_id', '=', back_order_in.id)])
d_done_qty = [move.product_uom_qty for move in movesD]
self.assertEqual(set(d_done_qty), set([8.0]), 'Wrong quantity of moves product D.')
# Check no back order is created.
self.assertFalse(self.PickingObj.search([('backorder_id', '=', back_order_in.id)]), "Should not create any back order.")
# -----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
# of product A, B, C, D
# -----------------------------------------------------------------------
# Check quants and available quantity for product A.
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 12.0, 'Wrong total stock location quantity (%s found instead of 12)' % (sum(total_qty)))
self.assertEqual(self.productA.qty_available, 12.0, 'Wrong quantity available (%s found instead of 12)' % (self.productA.qty_available))
# Check quants and available quantity for product B.
quants = self.StockQuantObj.search([('product_id', '=', self.productB.id), ('location_id', '=', self.stock_location)])
self.assertFalse(quants, 'No quant should found as outgoing shipment took everything out of stock')
self.assertEqual(self.productB.qty_available, 0.0, 'Total quantity in stock should be 0 as the backorder took everything out of stock')
# Check quants and available quantity for product C.
quants = self.StockQuantObj.search([('product_id', '=', self.productC.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 8.0, 'Wrong total stock location quantity (%s found instead of 8)' % (sum(total_qty)))
self.assertEqual(self.productC.qty_available, 8.0, 'Wrong quantity available (%s found instead of 8)' % (self.productC.qty_available))
# Check quants and available quantity for product D.
quants = self.StockQuantObj.search([('product_id', '=', self.productD.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 9.0, 'Wrong total stock location quantity (%s found instead of 9)' % (sum(total_qty)))
self.assertEqual(self.productD.qty_available, 9.0, 'Wrong quantity available (%s found instead of 9)' % (self.productD.qty_available))
# -----------------------------------------------------------------------
# Back order of Outgoing shipment
# ----------------------------------------------------------------------
back_order_out.action_done()
# Check stock location quants and available quantity for product A.
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertGreaterEqual(float_round(sum(total_qty), precision_rounding=0.0001), 1, 'Total stock location quantity for product A should not be nagative.')
def test_10_pickings_transfer_with_different_uom(self):
""" Picking transfer with diffrent unit of meassure. """
# ----------------------------------------------------------------------
# Create incoming shipment of products DozA, SDozA, SDozARound, kgB, gB
# ----------------------------------------------------------------------
# DozA ( 10 Dozen ) , SDozA ( 10.5 SuperDozen )
# SDozARound ( 10.5 10.5 SuperDozenRound ) , kgB ( 0.020 kg )
# gB ( 525.3 g )
# ----------------------------------------------------------------------
picking_in_A = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 10,
'product_uom': self.DozA.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 10.5,
'product_uom': self.SDozA.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 10.5,
'product_uom': self.SDozARound.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.020,
'product_uom': self.kgB.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 525.3,
'product_uom': self.gB.uom_id.id,
'picking_id': picking_in_A.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in_A.move_lines:
self.assertEqual(move.state, 'draft', 'Move state must be draft.')
# Confirm incoming shipment.
picking_in_A.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in_A.move_lines:
self.assertEqual(move.state, 'assigned', 'Move state must be draft.')
# ----------------------------------------------------
# Check pack operation quantity of incoming shipments.
# ----------------------------------------------------
PackSdozAround = self.StockPackObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_A.id)], limit=1)
self.assertEqual(PackSdozAround.product_qty, 11, 'Wrong quantity in pack operation (%s found instead of 11)' % (PackSdozAround.product_qty))
res_dict = picking_in_A.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
# -----------------------------------------------------------------------
# Check stock location quant quantity and quantity available
# -----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 10, 'Expecting 10 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 10, 'Wrong quantity available (%s found instead of 10)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 10.5, 'Expecting 10.5 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 10.5, 'Wrong quantity available (%s found instead of 10.5)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 11, 'Expecting 11 SDozenRound , got %.4f SDozenRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 11, 'Wrong quantity available (%s found instead of 11)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 525.3, 'Expecting 525.3 gram , got %.4f gram on location stock!' % (sum(total_qty)))
self.assertEqual(self.gB.qty_available, 525.3, 'Wrong quantity available (%s found instead of 525.3' % (self.gB.qty_available))
# Check quants and available quantity for product kgB
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 0.020, 'Expecting 0.020 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.020, 'Wrong quantity available (%s found instead of 0.020)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Create Incoming Shipment B
# ----------------------------------------------------------------------
picking_in_B = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_in_a = self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 120,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 1512,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 1584,
'product_uom': self.uom_unit.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 20.0,
'product_uom': self.uom_gm.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 0.525,
'product_uom': self.uom_kg.id,
'picking_id': picking_in_B.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in_B.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# ----------------------------------------------------------------------
# Check product quantity and unit of measure of pack operaation.
# ----------------------------------------------------------------------
# Check pack operation quantity and unit of measure for product DozA.
PackdozA = self.StockPackObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackdozA.product_uom_qty, 120, 'Wrong quantity in pack operation (%s found instead of 120)' % (PackdozA.product_uom_qty))
self.assertEqual(PackdozA.product_qty, 10, 'Wrong real quantity in pack operation (%s found instead of 10)' % (PackdozA.product_qty))
self.assertEqual(PackdozA.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product DozA.')
# Check pack operation quantity and unit of measure for product SDozA.
PackSdozA = self.StockPackObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackSdozA.product_uom_qty, 1512, 'Wrong quantity in pack operation (%s found instead of 1512)' % (PackSdozA.product_uom_qty))
self.assertEqual(PackSdozA.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product SDozA.')
# Check pack operation quantity and unit of measure for product SDozARound.
PackSdozAround = self.StockPackObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(PackSdozAround.product_uom_qty, 1584, 'Wrong quantity in pack operation (%s found instead of 1584)' % (PackSdozAround.product_uom_qty))
self.assertEqual(PackSdozAround.product_uom_id.id, self.uom_unit.id, 'Wrong uom in pack operation for product SDozARound.')
# Check pack operation quantity and unit of measure for product gB.
packgB = self.StockPackObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(packgB.product_uom_qty, 0.525, 'Wrong quantity in pack operation (%s found instead of 0.525)' % (packgB.product_uom_qty))
self.assertEqual(packgB.product_qty, 525, 'Wrong real quantity in pack operation (%s found instead of 525)' % (packgB.product_qty))
self.assertEqual(packgB.product_uom_id.id, packgB.move_id.product_uom.id, 'Wrong uom in pack operation for product kgB.')
# Check pack operation quantity and unit of measure for product kgB.
packkgB = self.StockPackObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(packkgB.product_uom_qty, 20.0, 'Wrong quantity in pack operation (%s found instead of 20)' % (packkgB.product_uom_qty))
self.assertEqual(packkgB.product_uom_id.id, self.uom_gm.id, 'Wrong uom in pack operation for product kgB')
# ----------------------------------------------------------------------
# Replace pack operation of incoming shipment.
# ----------------------------------------------------------------------
self.StockPackObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_uom_qty': 0.020, 'product_uom_id': self.uom_kg.id})
self.StockPackObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_uom_qty': 526, 'product_uom_id': self.uom_gm.id})
self.StockPackObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)]).write({
'product_uom_qty': 4, 'product_uom_id': self.uom_dozen.id})
self.StockPackObj.create({
'product_id': self.DozA.id,
'product_uom_qty': 48,
'product_uom_id': self.uom_unit.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'move_id': move_in_a.id
})
# -----------------
# Transfer product.
# -----------------
res_dict = picking_in_B.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
backorder_wizard = self.env[(res_dict_for_back_order.get('res_model'))].browse(res_dict_for_back_order.get('res_id'))
backorder_wizard.process()
# -----------------------------------------------------------------------
# Check incoming shipment
# -----------------------------------------------------------------------
# Check incoming shipment state.
self.assertEqual(picking_in_B.state, 'done', 'Incoming shipment state should be done.')
# Check incoming shipment move lines state.
for move in picking_in_B.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check total done move lines for incoming shipment.
self.assertEqual(len(picking_in_B.move_lines), 5, 'Wrong number of move lines')
# Check product DozA done quantity.
moves_DozA = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_DozA.product_uom_qty, 96, 'Wrong move quantity (%s found instead of 96)' % (moves_DozA.product_uom_qty))
self.assertEqual(moves_DozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product DozA.')
# Check product SDozA done quantity.
moves_SDozA = self.MoveObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_SDozA.product_uom_qty, 1512, 'Wrong move quantity (%s found instead of 1512)' % (moves_SDozA.product_uom_qty))
self.assertEqual(moves_SDozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product SDozA.')
# Check product SDozARound done quantity.
moves_SDozARound = self.MoveObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_SDozARound.product_uom_qty, 1584, 'Wrong move quantity (%s found instead of 1584)' % (moves_SDozARound.product_uom_qty))
self.assertEqual(moves_SDozARound.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product SDozARound.')
# Check product kgB done quantity.
moves_kgB = self.MoveObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_in_B.id)], limit=1)
self.assertEqual(moves_kgB.product_uom_qty, 20, 'Wrong quantity in move (%s found instead of 20)' % (moves_kgB.product_uom_qty))
self.assertEqual(moves_kgB.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product kgB.')
# Check two moves created for product gB with quantity (0.525 kg and 0.3 g)
moves_gB_kg = self.MoveObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_in_B.id), ('product_uom', '=', self.uom_kg.id)], limit=1)
self.assertEqual(moves_gB_kg.product_uom_qty, 0.526, 'Wrong move quantity (%s found instead of 0.526)' % (moves_gB_kg.product_uom_qty))
self.assertEqual(moves_gB_kg.product_uom.id, self.uom_kg.id, 'Wrong uom in move for product gB.')
# TODO Test extra move once the uom is editable in the move_lines
# ----------------------------------------------------------------------
# Check Back order of Incoming shipment.
# ----------------------------------------------------------------------
# Check back order created or not.
bo_in_B = self.PickingObj.search([('backorder_id', '=', picking_in_B.id)])
self.assertEqual(len(bo_in_B), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_in_B.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_DozA = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', bo_in_B.id)], limit=1)
self.assertEqual(moves_DozA.product_uom_qty, 24.0, 'Wrong move quantity (%s found instead of 0.525)' % (moves_DozA.product_uom_qty))
self.assertEqual(moves_DozA.product_uom.id, self.uom_unit.id, 'Wrong uom in move for product DozA.')
# ----------------------------------------------------------------------
# Check product stock location quantity and quantity available.
# ----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 18, 'Expecting 18 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 18, 'Wrong quantity available (%s found instead of 18)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 21, 'Expecting 21 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 21, 'Wrong quantity available (%s found instead of 21)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 22, 'Expecting 22 SDozenRound , got %.4f SDozenRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 22, 'Wrong quantity available (%s found instead of 22)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB.
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(round(sum(total_qty), 1), 1051.3, 'Expecting 1051 Gram , got %.4f Gram on location stock!' % (sum(total_qty)))
self.assertEqual(round(self.gB.qty_available, 1), 1051.3, 'Wrong quantity available (%s found instead of 1051)' % (self.gB.qty_available))
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 0.040, 'Expecting 0.040 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.040, 'Wrong quantity available (%s found instead of 0.040)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Create outgoing shipment.
# ----------------------------------------------------------------------
before_out_quantity = self.kgB.qty_available
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.966,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 0.034,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
res_dict = picking_out.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
# Check quantity difference after stock transfer.
quantity_diff = before_out_quantity - self.kgB.qty_available
self.assertEqual(float_round(quantity_diff, precision_rounding=0.0001), 0.001, 'Wrong quantity diffrence.')
self.assertEqual(self.kgB.qty_available, 0.039, 'Wrong quantity available (%s found instead of 0.039)' % (self.kgB.qty_available))
# ======================================================================
# Outgoing shipments.
# ======================================================================
# Create Outgoing shipment with ...
# product DozA ( 54 Unit ) , SDozA ( 288 Unit )
# product SDozRound ( 360 unit ) , product gB ( 0.503 kg )
# product kgB ( 19 g )
# ======================================================================
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.DozA.name,
'product_id': self.DozA.id,
'product_uom_qty': 54,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.SDozA.name,
'product_id': self.SDozA.id,
'product_uom_qty': 288,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.SDozARound.name,
'product_id': self.SDozARound.id,
'product_uom_qty': 361,
'product_uom': self.uom_unit.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.gB.name,
'product_id': self.gB.id,
'product_uom_qty': 0.503,
'product_uom': self.uom_kg.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.kgB.name,
'product_id': self.kgB.id,
'product_uom_qty': 20,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# Confirm outgoing shipment.
picking_out.action_confirm()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'confirmed', 'Wrong state of move line.')
# Assing product to outgoing shipments
picking_out.action_assign()
for move in picking_out.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Check product A available quantity
DozA_qty = self.MoveObj.search([('product_id', '=', self.DozA.id), ('picking_id', '=', picking_out.id)], limit=1).product_qty
self.assertEqual(DozA_qty, 4.5, 'Wrong move quantity availability (%s found instead of 4.5)' % (DozA_qty))
# Check product B available quantity
SDozA_qty = self.MoveObj.search([('product_id', '=', self.SDozA.id), ('picking_id', '=', picking_out.id)], limit=1).product_qty
self.assertEqual(SDozA_qty, 2, 'Wrong move quantity availability (%s found instead of 2)' % (SDozA_qty))
# Check product C available quantity
SDozARound_qty = self.MoveObj.search([('product_id', '=', self.SDozARound.id), ('picking_id', '=', picking_out.id)], limit=1).product_qty
self.assertEqual(SDozARound_qty, 3, 'Wrong move quantity availability (%s found instead of 3)' % (SDozARound_qty))
# Check product D available quantity
gB_qty = self.MoveObj.search([('product_id', '=', self.gB.id), ('picking_id', '=', picking_out.id)], limit=1).product_qty
self.assertEqual(gB_qty, 503, 'Wrong move quantity availability (%s found instead of 503)' % (gB_qty))
# Check product D available quantity
kgB_qty = self.MoveObj.search([('product_id', '=', self.kgB.id), ('picking_id', '=', picking_out.id)], limit=1).product_qty
self.assertEqual(kgB_qty, 0.020, 'Wrong move quantity availability (%s found instead of 0.020)' % (kgB_qty))
res_dict = picking_out.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
# ----------------------------------------------------------------------
# Check product stock location quantity and quantity available.
# ----------------------------------------------------------------------
# Check quants and available quantity for product DozA
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 13.5, 'Expecting 13.5 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 13.5, 'Wrong quantity available (%s found instead of 13.5)' % (self.DozA.qty_available))
# Check quants and available quantity for product SDozA
quants = self.StockQuantObj.search([('product_id', '=', self.SDozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 19, 'Expecting 19 SDozen , got %.4f SDozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozA.qty_available, 19, 'Wrong quantity available (%s found instead of 19)' % (self.SDozA.qty_available))
# Check quants and available quantity for product SDozARound
quants = self.StockQuantObj.search([('product_id', '=', self.SDozARound.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 19, 'Expecting 19 SDozRound , got %.4f SDozRound on location stock!' % (sum(total_qty)))
self.assertEqual(self.SDozARound.qty_available, 19, 'Wrong quantity available (%s found instead of 19)' % (self.SDozARound.qty_available))
# Check quants and available quantity for product gB.
quants = self.StockQuantObj.search([('product_id', '=', self.gB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(round(sum(total_qty), 1), 548.3, 'Expecting 547.6 g , got %.4f g on location stock!' % (sum(total_qty)))
self.assertEqual(round(self.gB.qty_available, 1), 548.3, 'Wrong quantity available (%s found instead of 547.6)' % (self.gB.qty_available))
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.kgB.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 0.019, 'Expecting 0.019 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(self.kgB.qty_available, 0.019, 'Wrong quantity available (%s found instead of 0.019)' % (self.kgB.qty_available))
# ----------------------------------------------------------------------
# Receipt back order of incoming shipment.
# ----------------------------------------------------------------------
res_dict = bo_in_B.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
# Check quants and available quantity for product kgB.
quants = self.StockQuantObj.search([('product_id', '=', self.DozA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 15.5, 'Expecting 15.5 Dozen , got %.4f Dozen on location stock!' % (sum(total_qty)))
self.assertEqual(self.DozA.qty_available, 15.5, 'Wrong quantity available (%s found instead of 15.5)' % (self.DozA.qty_available))
# -----------------------------------------
# Create product in kg and receive in ton.
# -----------------------------------------
productKG = self.ProductObj.create({'name': 'Product KG', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id, 'type': 'product'})
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': productKG.name,
'product_id': productKG.id,
'product_uom_qty': 1.0,
'product_uom': self.uom_tone.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'draft', 'Incoming shipment state should be draft.')
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Check pack operation quantity.
packKG = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_in.id)], limit=1)
self.assertEqual(packKG.product_qty, 1000, 'Wrong product real quantity in pack operation (%s found instead of 1000)' % (packKG.product_qty))
self.assertEqual(packKG.product_uom_qty, 1, 'Wrong product quantity in pack operation (%s found instead of 1)' % (packKG.product_uom_qty))
self.assertEqual(packKG.product_uom_id.id, self.uom_tone.id, 'Wrong product uom in pack operation.')
# Transfer Incoming shipment.
res_dict = picking_in.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
# -----------------------------------------------------------------------
# Check incoming shipment after transfer.
# -----------------------------------------------------------------------
# Check incoming shipment state.
self.assertEqual(picking_in.state, 'done', 'Incoming shipment state: %s instead of %s' % (picking_in.state, 'done'))
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move lines.')
# Check total done move lines for incoming shipment.
self.assertEqual(len(picking_in.move_lines), 1, 'Wrong number of move lines')
# Check product DozA done quantity.
move = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_in.id)], limit=1)
self.assertEqual(move.product_uom_qty, 1, 'Wrong product quantity in done move.')
self.assertEqual(move.product_uom.id, self.uom_tone.id, 'Wrong unit of measure in done move.')
self.assertEqual(productKG.qty_available, 1000, 'Wrong quantity available of product (%s found instead of 1000)' % (productKG.qty_available))
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': productKG.name,
'product_id': productKG.id,
'product_uom_qty': 2.5,
'product_uom': self.uom_gm.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', picking_out.id)], limit=1)
pack_opt.write({'product_uom_qty': 0.5})
res_dict = picking_out.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
backorder_wizard = self.env[(res_dict_for_back_order.get('res_model'))].browse(res_dict_for_back_order.get('res_id'))
backorder_wizard.process()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
# Check total quantity stock location.
self.assertEqual(sum(total_qty), 999.9995, 'Expecting 999.9995 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# ---------------------------------
# Check Back order created or not.
# ---------------------------------
bo_out_1 = self.PickingObj.search([('backorder_id', '=', picking_out.id)])
self.assertEqual(len(bo_out_1), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_1.move_lines), 1, 'Wrong number of move lines')
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_1.id)], limit=1)
# Check back order created with correct quantity and uom or not.
self.assertEqual(moves_KG.product_uom_qty, 2.0, 'Wrong move quantity (%s found instead of 2.0)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_1.action_assign()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_1.id)], limit=1)
pack_opt.write({'product_uom_qty': 0.5})
res_dict = bo_out_1.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
backorder_wizard = self.env[(res_dict_for_back_order.get('res_model'))].browse(res_dict_for_back_order.get('res_id'))
backorder_wizard.process()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
# Check total quantity stock location.
self.assertEqual(sum(total_qty), 999.9990, 'Expecting 999.9990 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
# ---------------------------------
bo_out_2 = self.PickingObj.search([('backorder_id', '=', bo_out_1.id)])
self.assertEqual(len(bo_out_2), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_2.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct move quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_2.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 1.5, 'Wrong move quantity (%s found instead of 1.5)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_2.action_assign()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_2.id)], limit=1)
pack_opt.write({'product_uom_qty': 0.5})
res_dict = bo_out_2.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
backorder_wizard = self.env[(res_dict_for_back_order.get('res_model'))].browse(res_dict_for_back_order.get('res_id'))
backorder_wizard.process()
# Check total quantity stock location of product KG.
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 999.9985, 'Expecting 999.9985 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
# ---------------------------------
bo_out_3 = self.PickingObj.search([('backorder_id', '=', bo_out_2.id)])
self.assertEqual(len(bo_out_3), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_3.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_3.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 1, 'Wrong move quantity (%s found instead of 1.0)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_3.action_assign()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_3.id)], limit=1)
pack_opt.write({'product_uom_qty': 0.5})
res_dict = bo_out_3.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
res_dict_for_back_order = wizard.process()
backorder_wizard = self.env[(res_dict_for_back_order.get('res_model'))].browse(res_dict_for_back_order.get('res_id'))
backorder_wizard.process()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 999.9980, 'Expecting 999.9980 kg , got %.4f kg on location stock!' % (sum(total_qty)))
# Check Back order created or not.
# ---------------------------------
bo_out_4 = self.PickingObj.search([('backorder_id', '=', bo_out_3.id)])
self.assertEqual(len(bo_out_4), 1, 'Back order should be created.')
# Check total move lines of back order.
self.assertEqual(len(bo_out_4.move_lines), 1, 'Wrong number of move lines')
# Check back order created with correct quantity and uom or not.
moves_KG = self.MoveObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_4.id)], limit=1)
self.assertEqual(moves_KG.product_uom_qty, 0.5, 'Wrong move quantity (%s found instead of 0.5)' % (moves_KG.product_uom_qty))
self.assertEqual(moves_KG.product_uom.id, self.uom_gm.id, 'Wrong uom in move for product KG.')
bo_out_4.action_assign()
pack_opt = self.StockPackObj.search([('product_id', '=', productKG.id), ('picking_id', '=', bo_out_4.id)], limit=1)
pack_opt.write({'product_uom_qty': 0.5})
res_dict = bo_out_4.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 999.9975, 'Expecting 999.9975 kg , got %.4f kg on location stock!' % (sum(total_qty)))
def test_20_create_inventory_with_different_uom(self):
"""Create inventory with different unit of measure."""
# ------------------------------------------------
# Test inventory with product A(Unit).
# ------------------------------------------------
inventory = self.InvObj.create({'name': 'Test',
'product_id': self.UnitA.id,
'filter': 'product'})
inventory.action_start()
self.assertFalse(inventory.line_ids, "Inventory line should not created.")
inventory_line = self.InvLineObj.create({
'inventory_id': inventory.id,
'product_id': self.UnitA.id,
'product_uom_id': self.uom_dozen.id,
'product_qty': 10,
'location_id': self.stock_location})
inventory.action_done()
# Check quantity available of product UnitA.
quants = self.StockQuantObj.search([('product_id', '=', self.UnitA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 120, 'Expecting 120 Units , got %.4f Units on location stock!' % (sum(total_qty)))
self.assertEqual(self.UnitA.qty_available, 120, 'Expecting 120 Units , got %.4f Units of quantity available!' % (self.UnitA.qty_available))
# Create Inventory again for product UnitA.
inventory = self.InvObj.create({'name': 'Test',
'product_id': self.UnitA.id,
'filter': 'product'})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1, "One inventory line should be created.")
inventory_line = self.InvLineObj.search([('product_id', '=', self.UnitA.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(inventory_line.product_qty, 120, "Wrong product quantity in inventory line.")
# Modify the inventory line and set the quantity to 144 product on this new inventory.
inventory_line.write({'product_qty': 144})
inventory.action_done()
move = self.MoveObj.search([('product_id', '=', self.UnitA.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(move.product_uom_qty, 24, "Wrong move quantity of product UnitA.")
# Check quantity available of product UnitA.
quants = self.StockQuantObj.search([('product_id', '=', self.UnitA.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 144, 'Expecting 144 Units , got %.4f Units on location stock!' % (sum(total_qty)))
self.UnitA._compute_quantities()
self.assertEqual(self.UnitA.qty_available, 144, 'Expecting 144 Units , got %.4f Units of quantity available!' % (self.UnitA.qty_available))
# ------------------------------------------------
# Test inventory with product KG.
# ------------------------------------------------
productKG = self.ProductObj.create({'name': 'Product KG', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id, 'type': 'product'})
inventory = self.InvObj.create({'name': 'Inventory Product KG',
'product_id': productKG.id,
'filter': 'product'})
inventory.action_start()
self.assertFalse(inventory.line_ids, "Inventory line should not created.")
inventory_line = self.InvLineObj.create({
'inventory_id': inventory.id,
'product_id': productKG.id,
'product_uom_id': self.uom_tone.id,
'product_qty': 5,
'location_id': self.stock_location})
inventory.action_done()
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 5000, 'Expecting 5000 kg , got %.4f kg on location stock!' % (sum(total_qty)))
self.assertEqual(productKG.qty_available, 5000, 'Expecting 5000 kg , got %.4f kg of quantity available!' % (productKG.qty_available))
# Create Inventory again.
inventory = self.InvObj.create({'name': 'Test',
'product_id': productKG.id,
'filter': 'product'})
inventory.action_start()
self.assertEqual(len(inventory.line_ids), 1, "One inventory line should be created.")
inventory_line = self.InvLineObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(inventory_line.product_qty, 5000, "Wrong product quantity in inventory line.")
# Modify the inventory line and set the quantity to 4000 product on this new inventory.
inventory_line.write({'product_qty': 4000})
inventory.action_done()
# Check inventory move quantity of product KG.
move = self.MoveObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(move.product_uom_qty, 1000, "Wrong move quantity of product KG.")
# Check quantity available of product KG.
quants = self.StockQuantObj.search([('product_id', '=', productKG.id), ('location_id', '=', self.stock_location)])
total_qty = [quant.quantity for quant in quants]
self.assertEqual(sum(total_qty), 4000, 'Expecting 4000 kg , got %.4f on location stock!' % (sum(total_qty)))
productKG._compute_quantities()
self.assertEqual(productKG.qty_available, 4000, 'Expecting 4000 kg , got %.4f of quantity available!' % (productKG.qty_available))
# --------------------------------------------------------
# TEST PARTIAL INVENTORY WITH PACKS and LOTS
# ---------------------------------------------------------
packproduct = self.ProductObj.create({'name': 'Pack Product', 'uom_id': self.uom_unit.id, 'uom_po_id': self.uom_unit.id, 'type': 'product'})
lotproduct = self.ProductObj.create({'name': 'Lot Product', 'uom_id': self.uom_unit.id, 'uom_po_id': self.uom_unit.id, 'type': 'product'})
inventory = self.InvObj.create({'name': 'Test Partial and Pack',
'filter': 'partial',
'location_id': self.stock_location})
inventory.action_start()
pack_obj = self.env['stock.quant.package']
lot_obj = self.env['stock.production.lot']
pack1 = pack_obj.create({'name': 'PACK00TEST1'})
pack_obj.create({'name': 'PACK00TEST2'})
lot1 = lot_obj.create({'name': 'Lot001', 'product_id': lotproduct.id})
move = self.MoveObj.search([('product_id', '=', productKG.id), ('inventory_id', '=', inventory.id)], limit=1)
self.assertEqual(len(move), 0, "Partial filter should not create a lines upon prepare")
line_vals = []
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 10, 'product_uom_id': packproduct.uom_id.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 20, 'product_uom_id': packproduct.uom_id.id, 'package_id': pack1.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 30, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': lot1.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 25, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': False}]
inventory.write({'line_ids': [(0, 0, x) for x in line_vals]})
inventory.action_done()
self.assertEqual(packproduct.qty_available, 30, "Wrong qty available for packproduct")
self.assertEqual(lotproduct.qty_available, 55, "Wrong qty available for lotproduct")
quants = self.StockQuantObj.search([('product_id', '=', packproduct.id), ('location_id', '=', self.stock_location), ('package_id', '=', pack1.id)])
total_qty = sum([quant.quantity for quant in quants])
self.assertEqual(total_qty, 20, 'Expecting 20 units on package 1 of packproduct, but we got %.4f on location stock!' % (total_qty))
# Create an inventory that will put the lots without lot to 0 and check that taking without pack will not take it from the pack
inventory2 = self.InvObj.create({'name': 'Test Partial Lot and Pack2',
'filter': 'partial',
'location_id': self.stock_location})
inventory2.action_start()
line_vals = []
line_vals += [{'location_id': self.stock_location, 'product_id': packproduct.id, 'product_qty': 20, 'product_uom_id': packproduct.uom_id.id}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 0, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': False}]
line_vals += [{'location_id': self.stock_location, 'product_id': lotproduct.id, 'product_qty': 10, 'product_uom_id': lotproduct.uom_id.id, 'prod_lot_id': lot1.id}]
inventory2.write({'line_ids': [(0, 0, x) for x in line_vals]})
inventory2.action_done()
self.assertEqual(packproduct.qty_available, 40, "Wrong qty available for packproduct")
self.assertEqual(lotproduct.qty_available, 10, "Wrong qty available for lotproduct")
quants = self.StockQuantObj.search([('product_id', '=', lotproduct.id), ('location_id', '=', self.stock_location), ('lot_id', '=', lot1.id)])
total_qty = sum([quant.quantity for quant in quants])
self.assertEqual(total_qty, 10, 'Expecting 0 units lot of lotproduct, but we got %.4f on location stock!' % (total_qty))
quants = self.StockQuantObj.search([('product_id', '=', lotproduct.id), ('location_id', '=', self.stock_location), ('lot_id', '=', False)])
total_qty = sum([quant.quantity for quant in quants])
self.assertEqual(total_qty, 0, 'Expecting 0 units lot of lotproduct, but we got %.4f on location stock!' % (total_qty))
# check product available of saleable category in stock location
category_id = self.ref('product.product_category_5')
inventory3 = self.InvObj.create({
'name': 'Test Category',
'filter': 'category',
'location_id': self.stock_location,
'category_id': category_id
})
# Start Inventory
inventory3.action_start()
# check all products have given category id
products_category = inventory3.line_ids.mapped('product_id.categ_id')
self.assertEqual(len(products_category), 1, "Inventory line should have only one category")
inventory3.action_done()
# check category with exhausted in stock location
inventory4 = self.InvObj.create({
'name': 'Test Exhausted Product',
'filter': 'category',
'location_id': self.stock_location,
'category_id': category_id,
'exhausted': True,
})
inventory4.action_start()
inventory4._get_inventory_lines_values()
inventory4_lines_count = len(inventory4.line_ids)
inventory4.action_done()
# Add one product in this product category
product = self.ProductObj.create({'name': 'Product A', 'type': 'product', 'categ_id': category_id})
# Check that this exhausted product is in the product category inventory adjustment
inventory5 = self.InvObj.create({
'name': 'Test Exhausted Product',
'filter': 'category',
'location_id': self.stock_location,
'category_id': category_id,
'exhausted': True,
})
inventory5.action_start()
inventory5._get_inventory_lines_values()
inventory5_lines_count = len(inventory5.line_ids)
inventory5.action_done()
self.assertEqual(inventory5_lines_count, inventory4_lines_count + 1, "The new product is not taken into account in the inventory valuation.")
self.assertTrue(product.id in inventory5.line_ids.mapped('product_id').ids, "The new product is not take into account in the inventory valuation.")
def test_30_check_with_no_incoming_lot(self):
""" Picking in without lots and picking out with"""
# Change basic operation type not to get lots
# Create product with lot tracking
picking_in = self.env['stock.picking.type'].browse(self.picking_type_in)
picking_in.use_create_lots = False
self.productA.tracking = 'lot'
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 4,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_in.id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
res_dict = picking_in.button_validate()
wizard = self.env[(res_dict.get('res_model'))].browse(res_dict.get('res_id'))
wizard.process()
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'name': 'testpicking',
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_out = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 3,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
pack_opt = self.StockPackObj.search([('picking_id', '=', picking_out.id)], limit=1)
lot1 = self.LotObj.create({'product_id': self.productA.id, 'name': 'LOT1'})
lot2 = self.LotObj.create({'product_id': self.productA.id, 'name': 'LOT2'})
lot3 = self.LotObj.create({'product_id': self.productA.id, 'name': 'LOT3'})
pack_opt.write({'lot_id': lot1.id, 'qty_done': 1.0})
self.StockPackObj.create({'product_id': self.productA.id, 'move_id': move_out.id, 'product_uom_id': move_out.product_uom.id, 'lot_id': lot2.id, 'qty_done': 1.0, 'location_id': self.stock_location, 'location_dest_id': self.customer_location})
self.StockPackObj.create({'product_id': self.productA.id, 'move_id': move_out.id, 'product_uom_id': move_out.product_uom.id, 'lot_id': lot3.id, 'qty_done': 2.0, 'location_id': self.stock_location, 'location_dest_id': self.customer_location})
picking_out.action_done()
quants = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.stock_location)])
# TODO wait sle fix
# self.assertFalse(quants, 'Should not have any quants in stock anymore')
def test_40_pack_in_pack(self):
""" Put a pack in pack"""
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.pack_location,
'location_dest_id': self.customer_location})
move_out = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 3,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.pack_location,
'location_dest_id': self.customer_location})
picking_pack = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.pack_location})
move_pack = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 3,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_pack.id,
'location_id': self.stock_location,
'location_dest_id': self.pack_location,
'move_dest_ids': [(4, move_out.id, 0)]})
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
move_in = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 3,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location,
'move_dest_ids': [(4, move_pack.id, 0)]})
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_in.action_confirm()
# Check incoming shipment move lines state.
for move in picking_in.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Check incoming shipment move lines state.
for move in picking_pack.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_pack.action_confirm()
# Check incoming shipment move lines state.
for move in picking_pack.move_lines:
self.assertEqual(move.state, 'waiting', 'Wrong state of move line.')
# Check incoming shipment move lines state.
for move in picking_out.move_lines:
self.assertEqual(move.state, 'draft', 'Wrong state of move line.')
# Confirm incoming shipment.
picking_out.action_confirm()
# Check incoming shipment move lines state.
for move in picking_out.move_lines:
self.assertEqual(move.state, 'waiting', 'Wrong state of move line.')
# Set the quantity done on the pack operation
move_in.move_line_ids.qty_done = 3.0
# Put in a pack
picking_in.put_in_pack()
# Get the new package
picking_in_package = move_in.move_line_ids.result_package_id
# Validate picking
picking_in.action_done()
# Check first picking state changed to done
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check next picking state changed to 'assigned'
for move in picking_pack.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Set the quantity done on the pack operation
move_pack.move_line_ids.qty_done = 3.0
# Get the new package
picking_pack_package = move_pack.move_line_ids.result_package_id
# Validate picking
picking_pack.action_done()
# Check second picking state changed to done
for move in picking_pack.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check next picking state changed to 'assigned'
for move in picking_out.move_lines:
self.assertEqual(move.state, 'assigned', 'Wrong state of move line.')
# Validate picking
picking_out.move_line_ids.qty_done = 3.0
picking_out_package = move_out.move_line_ids.result_package_id
picking_out.action_done()
# check all pickings are done
for move in picking_in.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
for move in picking_pack.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
for move in picking_out.move_lines:
self.assertEqual(move.state, 'done', 'Wrong state of move line.')
# Check picking_in_package is in picking_pack_package
self.assertEqual(picking_in_package.id, picking_pack_package.id, 'The package created in the picking in is not in the one created in picking pack')
self.assertEqual(picking_pack_package.id, picking_out_package.id, 'The package created in the picking in is not in the one created in picking pack')
# Check that we have one quant in customer location.
quant = self.StockQuantObj.search([('product_id', '=', self.productA.id), ('location_id', '=', self.customer_location)])
self.assertEqual(len(quant), 1, 'There should be one quant with package for customer location')
# Check that the parent package of the quant is the picking_in_package
def test_50_create_in_out_with_product_pack_lines(self):
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productE.name,
'product_id': self.productE.id,
'product_uom_qty': 10,
'product_uom': self.productE.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
picking_in.action_confirm()
pack_obj = self.env['stock.quant.package']
pack1 = pack_obj.create({'name': 'PACKINOUTTEST1'})
pack2 = pack_obj.create({'name': 'PACKINOUTTEST2'})
picking_in.move_line_ids[0].result_package_id = pack1
picking_in.move_line_ids[0].qty_done = 4
packop2 = picking_in.move_line_ids[0].with_context(bypass_reservation_update=True).copy({'product_uom_qty': 0})
packop2.qty_done = 6
packop2.result_package_id = pack2
picking_in.action_done()
quants = self.env['stock.quant']._gather(self.productE, self.env['stock.location'].browse(self.stock_location))
self.assertEqual(sum([x.quantity for x in quants]), 10.0, 'Expecting 10 pieces in stock')
# Check the quants are in the package
self.assertEqual(sum(x.quantity for x in pack1.quant_ids), 4.0, 'Pack 1 should have 4 pieces')
self.assertEqual(sum(x.quantity for x in pack2.quant_ids), 6.0, 'Pack 2 should have 6 pieces')
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productE.name,
'product_id': self.productE.id,
'product_uom_qty': 3,
'product_uom': self.productE.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
packout1 = picking_out.move_line_ids[0]
packout2 = picking_out.move_line_ids[0].with_context(bypass_reservation_update=True).copy({'product_uom_qty': 0})
packout1.qty_done = 2
packout1.package_id = pack1
packout2.package_id = pack2
packout2.qty_done = 1
picking_out.action_done()
# Should be only 1 negative quant in supplier location
neg_quants = self.env['stock.quant'].search([('product_id', '=', self.productE.id), ('quantity', '<', 0.0)])
self.assertEqual(len(neg_quants), 1, 'There should be 1 negative quants for supplier!')
self.assertEqual(neg_quants.location_id.id, self.supplier_location, 'There shoud be 1 negative quants for supplier!')
quants = self.env['stock.quant']._gather(self.productE, self.env['stock.location'].browse(self.stock_location))
self.assertEqual(len(quants), 2, 'We should have exactly 2 quants in the end')
def test_60_create_in_out_with_product_pack_lines(self):
picking_in = self.PickingObj.create({
'partner_id': self.partner_delta_id,
'picking_type_id': self.picking_type_in,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
self.MoveObj.create({
'name': self.productE.name,
'product_id': self.productE.id,
'product_uom_qty': 200,
'product_uom': self.productE.uom_id.id,
'picking_id': picking_in.id,
'location_id': self.supplier_location,
'location_dest_id': self.stock_location})
picking_in.action_confirm()
pack_obj = self.env['stock.quant.package']
pack1 = pack_obj.create({'name': 'PACKINOUTTEST1'})
pack2 = pack_obj.create({'name': 'PACKINOUTTEST2'})
picking_in.move_line_ids[0].result_package_id = pack1
picking_in.move_line_ids[0].qty_done = 120
packop2 = picking_in.move_line_ids[0].with_context(bypass_reservation_update=True).copy({'product_uom_qty': 0})
packop2.qty_done = 80
packop2.result_package_id = pack2
picking_in.action_done()
quants = self.env['stock.quant']._gather(self.productE, self.env['stock.location'].browse(self.stock_location))
self.assertEqual(sum([x.quantity for x in quants]), 200.0, 'Expecting 200 pieces in stock')
# Check the quants are in the package
self.assertEqual(sum(x.quantity for x in pack1.quant_ids), 120, 'Pack 1 should have 120 pieces')
self.assertEqual(sum(x.quantity for x in pack2.quant_ids), 80, 'Pack 2 should have 80 pieces')
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productE.name,
'product_id': self.productE.id,
'product_uom_qty': 200,
'product_uom': self.productE.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.action_confirm()
picking_out.action_assign()
# Convert entire packs into taking out of packs
packout0 = picking_out.move_line_ids[0]
packout1 = picking_out.move_line_ids[1]
packout0.write({
'package_id': pack1.id,
'product_id': self.productE.id,
'qty_done': 120.0,
'product_uom_id': self.productE.uom_id.id,
})
packout1.write({
'package_id': pack2.id,
'product_id': self.productE.id,
'qty_done': 80.0,
'product_uom_id': self.productE.uom_id.id,
})
picking_out.action_done()
# Should be only 1 negative quant in supplier location
neg_quants = self.env['stock.quant'].search([('product_id', '=', self.productE.id), ('quantity', '<', 0.0)])
self.assertEqual(len(neg_quants), 1, 'There should be 1 negative quants for supplier!')
self.assertEqual(neg_quants.location_id.id, self.supplier_location, 'There shoud be 1 negative quants for supplier!')
# We should also make sure that when matching stock moves with pack operations, it takes the correct
quants = self.env['stock.quant']._gather(self.productE, self.env['stock.location'].browse(self.stock_location))
self.assertEqual(len(quants), 0, 'We should have no quants in the end')
def test_70_picking_state_all_at_once_reserve(self):
""" This test will check that the state of the picking is correctly computed according
to the state of its move lines and its move type.
"""
# move_type: direct == partial, one == all at once
# picking: confirmed == waiting availability
# -----------------------------------------------------------
# "all at once" and "reserve" scenario
# -----------------------------------------------------------
# get one product in stock
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': self.productA.id,
'product_uom_id': self.productA.uom_id.id,
'product_qty': 1,
'location_id': self.stock_location
})]
})
inventory.action_done()
# create a "all at once" delivery order for two products
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.move_type = 'one'
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 2,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# validate this delivery order, it should be in the waiting state
picking_out.action_assign()
self.assertEquals(picking_out.state, "confirmed")
# receive one product in stock
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': self.productA.id,
'product_uom_id': self.productA.uom_id.id,
'product_qty': 2,
'location_id': self.stock_location
})]
})
inventory.action_done()
# recheck availability of the delivery order, it should be assigned
picking_out.action_assign()
self.assertEquals(len(picking_out.move_lines), 1.0)
self.assertEquals(picking_out.move_lines.product_qty, 2.0)
self.assertEquals(picking_out.state, "assigned")
def test_71_picking_state_all_at_once_force_assign(self):
""" This test will check that the state of the picking is correctly computed according
to the state of its move lines and its move type.
"""
# move_type: direct == partial, one == all at once
# picking: confirmed == waiting availability, partially_available = partially available
# -----------------------------------------------------------
# "all at once" and "force assign" scenario
# -----------------------------------------------------------
# create a "all at once" delivery order for two products
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.move_type = 'direct'
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 2,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# validate this delivery order, it should be in the waiting state
picking_out.action_assign()
self.assertEquals(picking_out.state, "confirmed")
# force assign on the delivery order, it should be assigned
picking_out.force_assign()
self.assertEquals(picking_out.state, "assigned")
def test_72_picking_state_partial_reserve(self):
""" This test will check that the state of the picking is correctly computed according
to the state of its move lines and its move type.
"""
# move_type: direct == partial, one == all at once
# picking: confirmed == waiting availability, partially_available = partially available
# -----------------------------------------------------------
# "partial" and "reserve" scenario
# -----------------------------------------------------------
# get one product in stock
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': self.productA.id,
'product_uom_id': self.productA.uom_id.id,
'product_qty': 1,
'location_id': self.stock_location
})]
})
inventory.action_done()
# create a "partial" delivery order for two products
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.move_type = 'direct'
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 2,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# validate this delivery order, it should be in partially available
picking_out.action_assign()
self.assertEquals(picking_out.state, "assigned")
# receive one product in stock
inventory = self.env['stock.inventory'].create({
'name': 'Inventory Product Table',
'filter': 'partial',
'line_ids': [(0, 0, {
'product_id': self.productA.id,
'product_uom_id': self.productA.uom_id.id,
'product_qty': 2,
'location_id': self.stock_location
})]
})
inventory.action_done()
# recheck availability of the delivery order, it should be assigned
picking_out.action_assign()
self.assertEquals(picking_out.state, "assigned")
def test_73_picking_state_partial_force_assign(self):
""" This test will check that the state of the picking is correctly computed according
to the state of its move lines and its move type.
"""
# move_type: direct == partial, one == all at once
# picking: confirmed == waiting availability, partially_available = partially available
# -----------------------------------------------------------
# "partial" and "force assign" scenario
# -----------------------------------------------------------
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
picking_out.move_type = 'direct'
self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 2,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
# validate this delivery order, it should be in the waiting state
picking_out.action_assign()
self.assertEquals(picking_out.state, "confirmed")
# force assign on the delivery order, it should be assigned
picking_out.force_assign()
self.assertEquals(picking_out.state, "assigned")
def test_74_move_state_waiting_mto(self):
""" This test will check that when a move is unreserved, its state changes to 'waiting' if
it has ancestors or if it has a 'procure_method' equal to 'make_to_order' else the state
changes to 'confirmed'.
"""
picking_out = self.PickingObj.create({
'partner_id': self.partner_agrolite_id,
'picking_type_id': self.picking_type_out,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
move_mto_alone = self.MoveObj.create({
'name': self.productA.name,
'product_id': self.productA.id,
'product_uom_qty': 2,
'product_uom': self.productA.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'procure_method': 'make_to_order'})
move_with_ancestors = self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 2,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
self.MoveObj.create({
'name': self.productB.name,
'product_id': self.productB.id,
'product_uom_qty': 2,
'product_uom': self.productB.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location,
'move_dest_ids': [(4, move_with_ancestors.id, 0)]})
other_move = self.MoveObj.create({
'name': self.productC.name,
'product_id': self.productC.id,
'product_uom_qty': 2,
'product_uom': self.productC.uom_id.id,
'picking_id': picking_out.id,
'location_id': self.stock_location,
'location_dest_id': self.customer_location})
with self.assertRaises(UserError):
move_mto_alone._action_confirm()
move_with_ancestors._action_confirm()
other_move._action_confirm()
move_mto_alone._do_unreserve()
move_with_ancestors._do_unreserve()
other_move._do_unreserve()
self.assertEquals(move_mto_alone.state, "draft")
self.assertEquals(move_with_ancestors.state, "waiting")
self.assertEquals(other_move.state, "confirmed")
| agpl-3.0 | 2,654,378,991,845,158,400 | 58.446419 | 249 | 0.578258 | false |
alriddoch/cyphesis | rulesets/basic/mind/goals/common/move.py | 1 | 18663 | #This file is distributed under the terms of the GNU General Public license.
#Copyright (C) 1999 Aloril (See the file COPYING for details).
from common import const
from physics import *
from physics import Vector3D
from physics import Point3D
from mind.Goal import Goal
from mind.goals.common.common import *
from random import *
import types
############################ MOVE ME ####################################
class move_me(Goal):
"""Move me to a certain place."""
def __init__(self, location, speed=1):
Goal.__init__(self,"move me to certain place",
self.am_I_at_loc,
[self.move_to_loc])
self.location=location
self.speed=speed
self.vars=["location", "speed"]
def get_location_instance(self, me):
location_=self.location
if type(location_)==LambdaType:
#print "Lambda location"
location_=location_(me)
if type(location_)==StringType:
#print "String location"
location_=me.get_knowledge("location",location_)
if not location_:
#print "Unknown location"
return None
return location_
def am_I_at_loc(self, me):
location=self.get_location_instance(me)
if not location:
#print "No location"
return 1
if square_horizontal_distance(me.location, location) < 4: # 1.5 * 1.5
#print "We are there"
return 1
else:
#print "We are not there"
return 0
def move_to_loc(self, me):
#print "Moving to location"
location=self.get_location_instance(me)
if not location:
#print "but can't - not location"
return
# FIXME Destination based movement - currently won't work if
# a LOC change is required.
velocity=distance_to(me.location, location).unit_vector()*self.speed
if abs(velocity.z) > 0.99:
return
target=location.copy()
target.velocity=velocity
if me.location.velocity.is_valid() and me.location.velocity.dot(target.velocity) > 0.8:
#print "Already on the way"
return
return Operation("move", Entity(me.id, location=target))
############################ MOVE ME AREA ####################################
class move_me_area(Goal):
"""Move me to a certain area."""
def __init__(self, location, range=30):
Goal.__init__(self, "move me to certain area",
self.am_I_in_area,
[move_me(location),self.latch_loc])
self.location=location
self.range=range
self.square_range=range*range
self.arrived=0
self.vars=["location","range","arrived"]
def get_location_instance(self, me):
# FIXME Duplicate of method from move_me() goal
location_=self.location
if type(location_)==LambdaType:
#print "Lambda location"
location_=location_(me)
if type(location_)==StringType:
#print "String location"
location_=me.get_knowledge("location",location_)
if not location_:
#print "Unknown location"
return None
return location_
def am_I_in_area(self, me):
location=self.get_location_instance(me)
if not location:
#print "No location"
return 0
if self.arrived:
#print "Already arrived at location"
square_dist=square_distance(me.location, location)
if square_dist > self.square_range:
self.arrived=0
#print "Moved away"
return 0
else:
#print "Still here", square_dist, self.square_range
return 1
#print "I am not there"
return 0
def latch_loc(self, me):
#print "Latching at location"
self.arrived=1
############################ MOVE ME PLACE ####################################
class move_me_place(move_me):
"""Move me to a place by name."""
def __init__(self, what):
Goal.__init__(self, "move me to a place where I can get something",
self.am_I_at_loc,
[self.move_to_loc])
self.what = what
self.vars = ["what"]
def get_location_instance(self, me):
location = me.get_knowledge("place", self.what)
if type(location) == StringType:
location = me.get_knowledge("location", location)
if not location:
return None
return location
############################ MOVE THING ####################################
class move_it(Goal):
"""Move something to a place."""
def __init__(self, what, location, speed=0):
Goal.__init__(self,"move this to certain place",
self.is_it_at_loc,
[self.move_it_to_loc])
self.what=what
self.speed=speed
self.location=location
self.wait=0
self.vars=["what","location","speed","wait"]
def is_it_at_loc(self, me):
#CHEAT!: cludge
if self.wait>0:
return 0
if type(self.location)==StringType:
self.location=me.get_knowledge("location",self.location)
if not isLocation(self.location):
self.location=Location(self.location,Point3D(0.0,0.0,0.0))
if type(self.what)==StringType:
if me.things.has_key(self.what)==0: return 1
what=me.things[self.what][0]
if what.location.parent.id!=self.location.parent.id: return 0
return what.location.coordinates.distance(self.location.coordinates)<1.5
def move_it_to_loc(self, me):
if self.wait>0:
self.wait=self.wait-1
return
if type(self.location)==StringType:
self.location=me.get_knowledge("location",self.location)
elif not isLocation(self.location):
self.location=Location(self.location,Point3D(0.0,0.0,0.0))
if type(self.what)==StringType:
if me.things.has_key(self.what)==0:
return
what=me.things[self.what][0]
if self.speed==0 or what.location.parent.id!=self.location.parent.id:
return Operation("move", Entity(what.id, location=self.location))
iloc=what.location.copy()
vel=what.location.coordinates.unit_vector_to(self.location.coordinates)
iloc.velocity = vel * self.speed
self.location.velocity=Vector3D(0.0,0.0,0.0)
mOp1=Operation("move", Entity(what.id, location=iloc))
mOp2=Operation("move", Entity(what.id, location=self.location))
time=((self.location.coordinates-what.location.coordinates).mag() / self.speed)
self.wait=(time/const.basic_tick)+1
mOp2.setFutureSeconds(time)
return Oplist(mOp1,mOp2)
############################ MOVE THING FROM ME ####################################
class move_it_outof_me(Goal):
"""Put something down."""
def __init__(self, what):
Goal.__init__(self, "move this thing from my inventory and disown",
self.is_it_not_with_me,
[self.drop_it])
self.what=what
def is_it_not_with_me(self, me):
if me.things.has_key(self.what)==0: return 0
what=me.things[self.what][0]
return what.location.parent.id!=me.id
def drop_it(self, me):
if me.things.has_key(self.what)==0: return
what=me.things[self.what][0]
me.remove_thing(what)
return Operation("move", Entity(what.id, location=me.location))
############################ MOVE ME TO THING ##################################
class move_me_to_possession(Goal):
"""Move me to the same place as something I own."""
def __init__(self, what):
Goal.__init__(self,"move me to this thing",
self.am_i_at_it,
[self.move_me_to_it])
self.what=what
self.vars=["what"]
def am_i_at_it(self, me):
what = self.what
if type(what)==StringType:
if me.things.has_key(what)==0: return 0
what=me.things[what][0]
if square_horizontal_distance(me.location, what.location) < 4: # 2 * 2
return 1
else:
return 0
def move_me_to_it(self, me):
what = self.what
if type(what)==StringType:
if me.things.has_key(what)==0: return
what=me.things[what][0]
target=what.location.copy()
if target.parent.id==me.location.parent.id:
target.velocity=me.location.coordinates.unit_vector_to(target.coordinates)
target.rotation=target.velocity
return Operation("move", Entity(me.id, location=target))
class move_me_to_focus(Goal):
"""Move me to something I am interested in."""
def __init__(self, what):
Goal.__init__(self,"move me to this thing",
self.am_i_at_it,
[self.move_me_to_it])
if type(what) == types.ListType:
self.what = what
else:
self.what = [ what ]
self.vars=["what"]
def am_i_at_it(self, me):
for what in self.what:
id = me.get_knowledge('focus', what)
if id == None: continue
thing = me.map.get(id)
if thing == None:
me.remove_knowledge('focus', what)
continue
if square_horizontal_distance(me.location, thing.location) < 4:
return 1
return 0
def move_me_to_it(self, me):
for what in self.what:
id = me.get_knowledge('focus', what)
if id == None: continue
thing = me.map.get(id)
if thing == None:
me.remove_knowledge('focus', what)
return
target=thing.location.copy()
if target.parent.id==me.location.parent.id:
target.velocity=me.location.coordinates.unit_vector_to(target.coordinates)
return Operation("move", Entity(me.id, location=target))
############################ MOVE THING TO ME ####################################
class pick_up_possession(Goal):
"""Pick up something I own."""
def __init__(self, what):
Goal.__init__(self,"move this thing to my inventory (class)",
self.is_it_with_me,
[move_me_to_possession(what),
self.pick_it_up])
self.what=what
self.vars=["what"]
def is_it_with_me(self, me):
#CHEAT!: cludge
what=self.what
if type(what)==StringType:
if me.things.has_key(self.what)==0: return 0
what=me.things[self.what][0]
if what.location.parent.id!=me.id:
if what.location.parent.id!=me.location.parent.id:
me.remove_thing(what.id)
me.map.delete(what.id)
return what.location.parent.id==me.id
def pick_it_up(self, me):
what=self.what
if type(what)==StringType:
if me.things.has_key(self.what)==0: return 0
what=me.things[self.what][0]
return Operation("move", Entity(id, location=Location(me, Point3D(0,0,0))))
class pick_up_focus(Goal):
"""Pick up something I am interested in."""
def __init__(self, what):
Goal.__init__(self,"move this thing to my inventory (class)",
self.is_it_with_me,
[move_me_to_focus(what),
self.pick_it_up])
if type(what) == types.ListType:
self.what = what
else:
self.what = [ what ]
self.vars=["what"]
def is_it_with_me(self, me):
#CHEAT!: cludge
for what in self.what:
id=me.get_knowledge('focus', what)
if id == None: continue
thing = me.map.get(id)
if thing == None:
me.remove_knowledge('focus', what)
continue
# If its not not near us on the ground, forget about it.
if thing.location.parent.id != me.location.parent.id:
me.remove_knowledge('focus', what)
continue
if thing.location.parent.id != me.id:
return 0
return 1
def pick_it_up(self, me):
for what in self.what:
id=me.get_knowledge('focus', what)
if id==None: continue
thing = me.map.get(id)
if thing == None:
me.remove_knowledge('focus', what)
continue
if thing.location.parent.id != me.id:
return Operation("move", Entity(id, location=Location(me, Point3D(0,0,0))))
############################ WANDER ####################################
class wander(Goal):
"""Move in a non-specific way."""
def __init__(self):
Goal.__init__(self,"wander randomly",false,[self.do_wandering])
def do_wandering(self, me):
#copied from build_home.find_place, but changed max amount to -5,5
loc = me.location.copy()
loc.coordinates=Point3D(map(lambda c:c+uniform(-5,5),
loc.coordinates))
ent=Entity(me,location=loc)
return Operation("move",ent)
############################ WANDER & SEARCH ############################
class search(Goal):
"""Move in a non-specific way looking for something."""
def __init__(self, what):
Goal.__init__(self, "search for a thing",
self.do_I_have,
[wander(false),
spot_something(what, 30)])
# Long range for testing only
self.what=what
self.vars=["what"]
def do_I_have(self, me):
return me.things.has_key(self.what)==1
############################ PURSUIT ####################################
class pursuit(Goal):
"""avoid or hunt something at range"""
def __init__(self, desc, what, range, direction):
Goal.__init__(self,"avoid something",self.not_visible,[self.run])
self.what = what
self.range = range
self.direction = direction
self.vars=["what","range","direction"]
def not_visible(self, me):
#print self.__class__.__name__,me.mem.recall_place(me.location,self.range,self.what)
return not me.mem.recall_place(me.location,self.range,self.what)
def run(self, me):
lst_of_what = me.mem.recall_place(me.location,self.range,self.what)
if not lst_of_what or len(lst_of_what)==0: return
dist_vect=distance_to(me.location,lst_of_what[0].location).unit_vector()
multiply = const.base_velocity * self.direction * const.basic_tick
loc = Location(me.location.parent)
loc.coordinates = me.location.coordinates + (dist_vect * multiply)
ent=Entity(me.id,location=loc)
return Operation("move",ent)
############################ AVOID ####################################
class avoid(pursuit):
"""avoid something at range"""
def __init__(self, what, range):
pursuit.__init__(self,"avoid something",what,range,-1)
################################ HUNT ################################
class hunt(pursuit):
"""hunt something at range"""
def __init__(self, what, range):
pursuit.__init__(self,"hunt something",what,range,1)
class hunt_for(pursuit):
"""hunt something at range"""
def __init__(self, what, range, proximity=5):
Goal.__init__(self,"hunt for something",
self.in_range,
[self.run])
self.what = what
self.range = range
self.proximity = proximity
self.square_proximity = proximity*proximity
self.direction = 1
self.vars=["what","range","direction"]
def in_range(self,me):
id=me.get_knowledge('focus', self.what)
if id==None: return
thing=me.map.get(id)
if thing==None: return
square_dist = square_distance(me.location, thing.location)
return square_dist < self.square_proximity
################################ HUNT ################################
class patrol(Goal):
"""Move around an area defined by some waypoints."""
def __init__(self, whlist):
Goal.__init__(self, "patrol an area",
false,
[move_me(whlist[0]),
self.increment])
self.list = whlist
self.stage = 0
self.count = len(whlist)
self.vars = ["stage", "list"]
def increment(self, me):
self.stage = self.stage + 1
if self.stage >= self.count:
self.stage = 0
self.subgoals[0].location = self.list[self.stage]
############################## ACCOMPANY ##############################
class accompany(Goal):
"""Move around staying close to someone."""
def __init__(self, who):
Goal.__init__(self, "stay with someone",
self.am_i_with,
[self.follow])
self.who=who
self.vars=["who"]
def am_i_with(self, me):
who=me.map.get(self.who)
if who == None:
self.irrelevant = 1
return 1
dist=distance_to(me.location, who.location)
# Are we further than 3 metres away
if dist.square_mag() > 25:
#print "We are far away", dist
if me.location.velocity.is_valid() and me.location.velocity.dot(dist) > 0.5:
#print "We moving towards them already"
return 1
return 0
else:
#print "We are close", dist
if me.location.velocity.is_valid() and me.location.velocity.dot(dist) < 0.5:
#print "We going away from them"
return 0
return 1
def follow(self, me):
who=me.map.get(self.who)
if who == None:
self.irrelevant = 1
return
dist=distance_to(me.location, who.location)
target = Location(me.location.parent)
square_dist=dist.square_mag()
if square_dist > 64:
#print "We must be far far away - run"
target.velocity = dist.unit_vector() * 3
elif square_dist > 25:
#print "We must be far away - walk"
target.velocity = dist.unit_vector()
else:
#print "We must be close - stop"
target.velocity = Vector3D(0,0,0)
return Operation("move", Entity(me.id, location=target))
| gpl-2.0 | -7,111,369,907,136,039,000 | 37.010183 | 95 | 0.530783 | false |
vanant/googleads-dfa-reporting-samples | python/v2.1/create_advertiser_group.py | 1 | 2002 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates an advertiser group.
Tags: advertiserGroups.insert
"""
__author__ = ('[email protected] (Jonathon Imperiosi)')
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to add an advertiser group for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
try:
# Construct and save advertiser.
advertiser_group = {
'name': 'Test Advertiser Group'
}
request = service.advertiserGroups().insert(
profileId=profile_id, body=advertiser_group)
# Execute request and print response.
response = request.execute()
print ('Created advertiser group with ID %s and name "%s".'
% (response['id'], response['name']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -9,044,761,426,972,332,000 | 28.880597 | 77 | 0.701798 | false |
hgschmie/presto | presto-docs/src/main/sphinx/conf.py | 1 | 2451 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('PRESTO_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '1.1'
extensions = ['backquote', 'download', 'issue']
templates_path = ['templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Presto'
version = get_version()
release = version
exclude_patterns = ['_build']
highlight_language = 'sql'
default_role = 'backquote'
rst_epilog = """
.. |presto_server_release| replace:: ``presto-server-{release}``
""".replace('{release}', release)
# -- Options for HTML output ---------------------------------------------------
html_theme_path = ['themes']
html_theme = 'presto'
html_title = '%s %s Documentation' % (project, release)
html_logo = 'images/presto.svg'
html_add_permalinks = '#'
html_show_copyright = False
html_show_sphinx = False
html_sidebars = {
"**": ['logo-text.html', 'globaltoc.html', 'localtoc.html', 'searchbox.html']
}
html_theme_options = {
'base_url': '/',
}
| apache-2.0 | 5,452,581,341,700,879,000 | 21.906542 | 81 | 0.651163 | false |
apple/swift-lldb | packages/Python/lldbsuite/test/linux/mix-dwo-and-regular-objects/TestMixedDwarfBinary.py | 1 | 1626 | """ Testing debugging of a binary with "mixed" dwarf (with/without fission). """
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestMixedDwarfBinary(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
@no_debug_info_test # Prevent the genaration of the dwarf version of this test
@add_test_categories(["dwo"])
@skipUnlessPlatform(["linux"])
@skipIf(bugnumber="rdar://38550275")
def test_mixed_dwarf(self):
"""Test that 'frame variable' works
for the executable built from two source files compiled
with/whithout -gsplit-dwarf correspondingly."""
self.build()
exe = self.getBuildArtifact("a.out")
self.target = self.dbg.CreateTarget(exe)
self.assertTrue(self.target, VALID_TARGET)
main_bp = self.target.BreakpointCreateByName("g", "a.out")
self.assertTrue(main_bp, VALID_BREAKPOINT)
self.process = self.target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(self.process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
self.assertTrue(self.process.GetState() == lldb.eStateStopped,
STOPPED_DUE_TO_BREAKPOINT)
frame = self.process.GetThreadAtIndex(0).GetFrameAtIndex(0)
x = frame.FindVariable("x")
self.assertTrue(x.IsValid(), "x is not valid")
y = frame.FindVariable("y")
self.assertTrue(y.IsValid(), "y is not valid")
| apache-2.0 | 8,217,625,530,235,348,000 | 35.133333 | 83 | 0.661132 | false |
sharad/calibre | setup/git_post_checkout_hook.py | 1 | 1032 | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os, subprocess, sys
prev_rev, current_rev, flags = [x.decode('utf-8') if isinstance(x, bytes) else x for x in sys.argv[1:]]
def get_branch_name(rev):
return subprocess.check_output(['git', 'name-rev', '--name-only', rev]).decode('utf-8').strip()
base = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.chdir(base)
if flags == '1': # A branch checkout
prev_branch, cur_branch = map(get_branch_name, (prev_rev, current_rev))
subprocess.check_call(['python', 'setup.py', 'gui', '--summary'])
# Remove .pyc files as some of them might have been orphaned
for dirpath, dirnames, filenames in os.walk('.'):
for f in filenames:
fpath = os.path.join(dirpath, f)
if f.endswith('.pyc'):
os.remove(fpath)
| gpl-3.0 | -5,469,249,687,992,142,000 | 35.857143 | 103 | 0.630814 | false |
Edraak/edx-platform | lms/djangoapps/courseware/courses.py | 1 | 15960 | """
Functions for accessing and displaying courses within the
courseware.
"""
from datetime import datetime
from collections import defaultdict
from fs.errors import ResourceNotFoundError
import logging
from path import Path as path
import pytz
from django.http import Http404
from django.conf import settings
from edxmako.shortcuts import render_to_string
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from static_replace import replace_static_urls
from xmodule.modulestore import ModuleStoreEnum
from xmodule.x_module import STUDENT_VIEW
from microsite_configuration import microsite
from courseware.access import has_access
from courseware.date_summary import (
CourseEndDate,
CourseStartDate,
TodaysDate,
VerificationDeadlineDate,
VerifiedUpgradeDeadlineDate,
)
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module
from lms.djangoapps.courseware.courseware_access_exception import CoursewareAccessException
from student.models import CourseEnrollment
import branding
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
def get_course(course_id, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If the course does not exist, raises a ValueError. This is appropriate
for internal use.
depth: The number of levels of children for the modulestore to cache.
None means infinite depth. Default is to fetch no children.
"""
course = modulestore().get_course(course_id, depth=depth)
if course is None:
raise ValueError(u"Course not found: {0}".format(course_id))
return course
def get_course_by_id(course_key, depth=0):
"""
Given a course id, return the corresponding course descriptor.
If such a course does not exist, raises a 404.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
"""
with modulestore().bulk_operations(course_key):
course = modulestore().get_course(course_key, depth=depth)
if course:
return course
else:
raise Http404("Course not found.")
class UserNotEnrolled(Http404):
def __init__(self, course_key):
super(UserNotEnrolled, self).__init__()
self.course_key = course_key
def get_course_with_access(user, action, course_key, depth=0, check_if_enrolled=False):
"""
Given a course_key, look up the corresponding course descriptor,
check that the user has the access to perform the specified action
on the course, and return the descriptor.
Raises a 404 if the course_key is invalid, or the user doesn't have access.
depth: The number of levels of children for the modulestore to cache. None means infinite depth
check_if_enrolled: If true, additionally verifies that the user is either enrolled in the course
or has staff access.
"""
course = get_course_by_id(course_key, depth)
check_course_access(course, user, action, check_if_enrolled)
return course
def get_course_overview_with_access(user, action, course_key, check_if_enrolled=False):
"""
Given a course_key, look up the corresponding course overview,
check that the user has the access to perform the specified action
on the course, and return the course overview.
Raises a 404 if the course_key is invalid, or the user doesn't have access.
check_if_enrolled: If true, additionally verifies that the user is either enrolled in the course
or has staff access.
"""
try:
course_overview = CourseOverview.get_from_id(course_key)
except CourseOverview.DoesNotExist:
raise Http404("Course not found.")
check_course_access(course_overview, user, action, check_if_enrolled)
return course_overview
def check_course_access(course, user, action, check_if_enrolled=False):
"""
Check that the user has the access to perform the specified action
on the course (CourseDescriptor|CourseOverview).
check_if_enrolled: If true, additionally verifies that the user is either
enrolled in the course or has staff access.
"""
access_response = has_access(user, action, course, course.id)
if not access_response:
# Deliberately return a non-specific error message to avoid
# leaking info about access control settings
raise CoursewareAccessException(access_response)
if check_if_enrolled:
# Verify that the user is either enrolled in the course or a staff
# member. If user is not enrolled, raise UserNotEnrolled exception
# that will be caught by middleware.
if not ((user.id and CourseEnrollment.is_enrolled(user, course.id)) or has_access(user, 'staff', course)):
raise UserNotEnrolled(course.id)
def find_file(filesystem, dirs, filename):
"""
Looks for a filename in a list of dirs on a filesystem, in the specified order.
filesystem: an OSFS filesystem
dirs: a list of path objects
filename: a string
Returns d / filename if found in dir d, else raises ResourceNotFoundError.
"""
for directory in dirs:
filepath = path(directory) / filename
if filesystem.exists(filepath):
return filepath
raise ResourceNotFoundError(u"Could not find {0}".format(filename))
def get_course_about_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course about page,
given the key for the section.
Valid keys:
- overview
- short_description
- description
- key_dates (includes start, end, exams, etc)
- video
- course_staff_short
- course_staff_extended
- requirements
- syllabus
- textbook
- faq
- effort
- more_info
- ocw_links
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
html_sections = {
'short_description',
'description',
'key_dates',
'video',
'course_staff_short',
'course_staff_extended',
'requirements',
'syllabus',
'textbook',
'faq',
'more_info',
'overview',
'effort',
'end_date',
'prerequisites',
'ocw_links'
}
if section_key in html_sections:
try:
loc = course.location.replace(category='about', name=section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
about_module = get_module(
request.user,
request,
loc,
field_data_cache,
log_if_not_found=False,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path,
course=course
)
html = ''
if about_module is not None:
try:
html = about_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, section_key=%s",
course, section_key
)
return html
except ItemNotFoundError:
log.warning(
u"Missing about section %s in course %s",
section_key, course.location.to_deprecated_string()
)
return None
raise KeyError("Invalid about key " + str(section_key))
def get_course_info_section_module(request, course, section_key):
"""
This returns the course info module for a given section_key.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
usage_key = course.id.make_usage_key('course_info', section_key)
# Use an empty cache
field_data_cache = FieldDataCache([], course.id, request.user)
return get_module(
request.user,
request,
usage_key,
field_data_cache,
log_if_not_found=False,
wrap_xmodule_display=False,
static_asset_path=course.static_asset_path,
course=course
)
def get_course_info_section(request, course, section_key):
"""
This returns the snippet of html to be rendered on the course info page,
given the key for the section.
Valid keys:
- handouts
- guest_handouts
- updates
- guest_updates
"""
info_module = get_course_info_section_module(request, course, section_key)
html = ''
if info_module is not None:
try:
html = info_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course=%s, section_key=%s",
course, section_key
)
return html
def get_course_date_summary(course, user):
"""
Return the snippet of HTML to be included on the course info page
in the 'Date Summary' section.
"""
blocks = _get_course_date_summary_blocks(course, user)
return '\n'.join(
b.render() for b in blocks
)
def _get_course_date_summary_blocks(course, user):
"""
Return the list of blocks to display on the course info page,
sorted by date.
"""
block_classes = (
CourseEndDate,
CourseStartDate,
TodaysDate,
VerificationDeadlineDate,
VerifiedUpgradeDeadlineDate,
)
blocks = (cls(course, user) for cls in block_classes)
def block_key_fn(block):
"""
If the block's date is None, return the maximum datetime in order
to force it to the end of the list of displayed blocks.
"""
if block.date is None:
return datetime.max.replace(tzinfo=pytz.UTC)
return block.date
return sorted((b for b in blocks if b.is_enabled), key=block_key_fn)
# TODO: Fix this such that these are pulled in as extra course-specific tabs.
# arjun will address this by the end of October if no one does so prior to
# then.
def get_course_syllabus_section(course, section_key):
"""
This returns the snippet of html to be rendered on the syllabus page,
given the key for the section.
Valid keys:
- syllabus
- guest_syllabus
"""
# Many of these are stored as html files instead of some semantic
# markup. This can change without effecting this interface when we find a
# good format for defining so many snippets of text/html.
if section_key in ['syllabus', 'guest_syllabus']:
try:
filesys = course.system.resources_fs
# first look for a run-specific version
dirs = [path("syllabus") / course.url_name, path("syllabus")]
filepath = find_file(filesys, dirs, section_key + ".html")
with filesys.open(filepath) as html_file:
return replace_static_urls(
html_file.read().decode('utf-8'),
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path,
)
except ResourceNotFoundError:
log.exception(
u"Missing syllabus section %s in course %s",
section_key, course.location.to_deprecated_string()
)
return "! Syllabus missing !"
raise KeyError("Invalid about key " + str(section_key))
def get_courses(user, org=None, filter_=None):
"""
Returns a list of courses available, sorted by course.number and optionally
filtered by org code (case-insensitive).
"""
courses = branding.get_visible_courses(org=org, filter_=filter_)
permission_name = microsite.get_value(
'COURSE_CATALOG_VISIBILITY_PERMISSION',
settings.COURSE_CATALOG_VISIBILITY_PERMISSION
)
courses = [c for c in courses if has_access(user, permission_name, c)]
return courses
def get_specialization_courses(org=None, filter_=None):
"""
Returns a list of courses available, sorted by course.number and optionally
filtered by org code (case-insensitive).
"""
courses = branding.get_visible_courses(org=org, filter_=filter_)
return courses
def get_permission_for_course_about():
"""
Returns the CourseOverview object for the course after checking for access.
"""
return microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
def sort_by_announcement(courses):
"""
Sorts a list of courses by their announcement date. If the date is
not available, sort them by their start date.
"""
# Sort courses by how far are they from they start day
key = lambda course: course.sorting_score
courses = sorted(courses, key=key)
return courses
def sort_by_start_date(courses):
"""
Returns a list of courses sorted by their start date, latest first.
"""
courses = sorted(
courses,
key=lambda course: (course.has_ended(), course.start is None, course.start),
reverse=False
)
return courses
def get_cms_course_link(course, page='course'):
"""
Returns a link to course_index for editing the course in cms,
assuming that the course is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, unicode(course.id))
def get_cms_block_link(block, page):
"""
Returns a link to block_index for editing the course in cms,
assuming that the block is actually cms-backed.
"""
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
return u"//{}/{}/{}".format(settings.CMS_BASE, page, block.location)
def get_studio_url(course, page):
"""
Get the Studio URL of the page that is passed in.
Args:
course (CourseDescriptor)
"""
is_studio_course = course.course_edit_method == "Studio"
is_mongo_course = modulestore().get_modulestore_type(course.id) != ModuleStoreEnum.Type.xml
studio_link = None
if is_studio_course and is_mongo_course:
studio_link = get_cms_course_link(course, page)
return studio_link
def get_problems_in_section(section):
"""
This returns a dict having problems in a section.
Returning dict has problem location as keys and problem
descriptor as values.
"""
problem_descriptors = defaultdict()
if not isinstance(section, UsageKey):
section_key = UsageKey.from_string(section)
else:
section_key = section
# it will be a Mongo performance boost, if you pass in a depth=3 argument here
# as it will optimize round trips to the database to fetch all children for the current node
section_descriptor = modulestore().get_item(section_key, depth=3)
# iterate over section, sub-section, vertical
for subsection in section_descriptor.get_children():
for vertical in subsection.get_children():
for component in vertical.get_children():
if component.location.category == 'problem' and getattr(component, 'has_score', False):
problem_descriptors[unicode(component.location)] = component
return problem_descriptors
| agpl-3.0 | -754,830,506,233,932,700 | 31.177419 | 114 | 0.656203 | false |
derickc/Fountainhead | autoscroll.py | 1 | 14453 | import sublime
import sublime_plugin
try:
from . import scopes
except (ImportError, ValueError):
import scopes
fountain_scope = scopes.fountain_scope
action_scope = scopes.action_scope
boneyard_scope = scopes.boneyard_scope
dialogue_scope = scopes.dialogue_scope
lyrics_scope = scopes.lyrics_scope
character_scope = scopes.character_scope
parenthetical_scope = scopes.parenthetical_scope
note_scope = scopes.note_scope
scene_scope = scopes.scene_scope
character_list_scope = scopes.character_list_scope
section_scope = scopes.section_scope
synopses_scope = scopes.synopses_scope
pagebreak_scope = scopes.pagebreak_scope
title_page_scope = scopes.title_page_scope
center_scope = scopes.center_scope
transition_scope = scopes.transition_scope
class AutoScrollCommand(sublime_plugin.EventListener):
# pagesWritten = 0
# def on_activated(self, view):
# if 'Fountainhead.tmLanguage' in view.settings().get('syntax'):
# if view.settings().get('auto_scroll', True):
# view.set_status('AutoScrollCommand',
# 'Pages written: %d' % self.pagesWritten)
def modified_scroll(self, view):
if view.settings().get('syntax') == 'Packages/Fountainhead/Fountainhead.tmLanguage':
# if 'Fountainhead.tmLanguage' in view.settings().get('syntax'):
# if sublime.load_settings('Fountainhead.sublime-settings').get('auto_scroll', True):
if view.settings().get('auto_scroll', True):
self.currentY = view.text_to_layout(view.sel()[0].begin())[1]
self.viewportY = view.viewport_position()[1]
self.viewportHeight = view.viewport_extent()[1]
self.lineHeight = view.line_height()
self.pageLines = ((self.currentY - self.viewportY) /
self.lineHeight)
self.rowCounter = 1
self.stopCounter = 0
# sets how many rows to look for a previous element (a row can be many lines)
self.rowCounterLimit = 8
# sets the threshold on how many lines to look for a previous element
self.lineAmount = 8
# sets how many lines to scroll up if scrolling to the previous element is too much
self.scrollAmount = 6
if (self.currentY >= (self.viewportY + self.viewportHeight -
(1.9 * self.lineHeight))):
self.rowCounter = 1
while (self.rowCounter <= self.rowCounterLimit and
self.stopCounter == 0):
self.currentRow = (view.rowcol(view.sel()[0].begin()))[0]
self.scope = view.scope_name(view.text_point((self.currentRow - self.rowCounter - 1), 0))
# Needed?
# if (self.scope == 'text.fountain keyword entity.other.attribute-name '):
# self.rowCounter += 1
# Needed?
# elif (self.scope == 'text.fountain keyword '):
# self.rowCounter += 1
# if (self.scope == 'text.fountain ') and (view.text_point((self.currentRow - self.rowCounter), 0) == view.text_point((self.currentRow - self.rowCounter - 1), 1)):
if (self.scope == fountain_scope) and (view.text_point((self.currentRow - self.rowCounter), 0) == view.text_point((self.currentRow - self.rowCounter - 1), 1)):
self.rowCounter += 1
# Scene Heading
# elif (self.scope == 'text.fountain entity.name.function '):
elif (self.scope == fountain_scope + scene_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Character Name
# elif (self.scope == 'text.fountain string entity.name.class '):
elif (self.scope == fountain_scope + dialogue_scope + character_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Action
# elif (self.scope == 'text.fountain foreground '):
elif (self.scope == fountain_scope + action_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Notes
# elif (self.scope == 'text.fountain variable.parameter '):
elif (self.scope == fountain_scope + note_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Synopses
# elif (self.scope == 'text.fountain meta.diff '):
elif (self.scope == fountain_scope + synopses_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# view.run_command('scroll_lines', {"amount": -(self.pageLines - (self.rowCounter + 0.5)) })
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# elif (self.scope == 'text.fountain '):
elif (self.scope == fountain_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Section
# elif (self.scope == 'text.fountain entity.name.filename '):
elif (self.scope == fountain_scope + section_scope):
self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
else:
view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
self.rowCounter = 1
# Boneyard
# elif (self.scope == 'text.fountain comment '):
# self.newY = view.text_to_layout((view.text_point((self.currentRow - self.rowCounter - 1), 0)))[1]
# if (((self.currentY - self.newY) / self.lineHeight) > self.lineAmount):
# view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
# self.stopCounter = 1
# self.rowCounter = 1
# else:
# view.run_command('scroll_lines', {"amount": -(((self.newY - self.viewportY) / self.lineHeight) - (0.5))})
# self.pagesWritten += 1
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
# self.stopCounter = 1
# self.rowCounter = 1
else:
self.rowCounter += 1
while ((self.rowCounter > self.rowCounterLimit) and self.stopCounter == 0):
view.run_command('scroll_lines', {"amount": -(self.pageLines - self.scrollAmount)})
# self.pagesWritten += 1
# Will keep track of written pages in preview script
# view.set_status('AutoScrollCommand', 'Pages written: %d' % self.pagesWritten)
self.stopCounter = 1
def on_modified_async(self, view):
if int(sublime.version()) >= 3000:
self.modified_scroll(view)
def on_modified(self, view):
if int(sublime.version()) < 3000:
self.modified_scroll(view)
| mit | 9,084,637,440,702,332,000 | 66.537383 | 187 | 0.478447 | false |
maciejkula/binge | binge/models.py | 1 | 15339 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
from binge.layers import ScaledEmbedding, ZeroEmbedding
from binge.native import align, get_lib
def _gpu(tensor, gpu=False):
if gpu:
return tensor.cuda()
else:
return tensor
def _cpu(tensor):
if tensor.is_cuda:
return tensor.cpu()
else:
return tensor
def _minibatch(tensor, batch_size):
for i in range(0, len(tensor), batch_size):
yield tensor[i:i + batch_size]
def binarize_array(array):
assert array.shape[1] % 8 == 0
array = (np.sign(array) > 0.0).astype(np.bool)
array = np.packbits(array, axis=1)
return array
class BinaryDot(Function):
def forward(self, x, y):
x_scale = x.abs().mean(1)
y_scale = y.abs().mean(1)
sign_x = x.sign()
sign_y = y.sign()
xnor = sign_x * sign_y
self.save_for_backward(x, y)
return xnor.sum(1) * x_scale * y_scale
def backward(self, grad_output):
x, y = self.saved_tensors
embedding_dim = x.size()[1]
grad_output = grad_output.expand_as(x)
x_scale = x.abs().mean(1).expand_as(x)
y_scale = y.abs().mean(1).expand_as(y)
sign_x = x.sign()
sign_y = y.sign()
dx_dsign = (x.abs() <= 1.0).float()
dy_dsign = (y.abs() <= 1.0).float()
grads = (grad_output * sign_y * y_scale *
(1.0 / embedding_dim + dx_dsign * x_scale),
grad_output * sign_x * x_scale *
(1.0 / embedding_dim + dy_dsign * y_scale))
return grads
def binary_dot(x, y):
return BinaryDot()(x, y)
class BilinearNet(nn.Module):
def __init__(self,
num_users,
num_items,
embedding_dim,
xnor=False,
sparse=False):
super().__init__()
self.xnor = xnor
self.embedding_dim = embedding_dim
self.user_embeddings = ScaledEmbedding(num_users, embedding_dim,
sparse=sparse)
self.item_embeddings = ScaledEmbedding(num_items, embedding_dim,
sparse=sparse)
self.user_biases = ZeroEmbedding(num_users, 1, sparse=sparse)
self.item_biases = ZeroEmbedding(num_items, 1, sparse=sparse)
def forward(self, user_ids, item_ids):
user_embedding = self.user_embeddings(user_ids)
item_embedding = self.item_embeddings(item_ids)
user_embedding = user_embedding.view(-1, self.embedding_dim)
item_embedding = item_embedding.view(-1, self.embedding_dim)
user_bias = self.user_biases(user_ids).view(-1, 1)
item_bias = self.item_biases(item_ids).view(-1, 1)
if self.xnor:
dot = binary_dot(user_embedding, item_embedding)
else:
dot = (user_embedding * item_embedding).sum(1)
return dot + user_bias + item_bias
class FactorizationModel(object):
"""
A number of classic factorization models, implemented in PyTorch.
Available loss functions:
- pointwise logistic
- BPR: Rendle's personalized Bayesian ranking
- adaptive: a variant of WARP with adaptive selection of negative samples
- regression: minimizing the regression loss between true and predicted ratings
- truncated_regression: truncated regression model, that jointly models
the likelihood of a rating being given and the value
of the rating itself.
Performance notes: neural network toolkits do not perform well on sparse tasks
like recommendations. To achieve acceptable speed, either use the `sparse` option
on a CPU or use CUDA with very big minibatches (1024+).
"""
def __init__(self,
loss='pointwise',
xnor=False,
embedding_dim=64,
n_iter=3,
batch_size=64,
l2=0.0,
learning_rate=1e-3,
use_cuda=False,
sparse=False,
random_seed=None):
assert loss in ('pointwise',
'bpr',
'adaptive')
self._loss = loss
self._embedding_dim = embedding_dim
self._n_iter = n_iter
self._batch_size = batch_size
self._l2 = l2
self._learning_rate = learning_rate
self._use_cuda = use_cuda
self._sparse = sparse
self._xnor = xnor
self._random_state = np.random.RandomState(random_seed)
self._num_users = None
self._num_items = None
self._net = None
def get_params(self):
return {'loss': self._loss,
'embedding_dim': self._embedding_dim,
'n_iter': self._n_iter,
'batch_size': self._batch_size,
'l2': self._l2,
'learning_rate': self._learning_rate,
'use_cuda': self._use_cuda,
'xnor': self._xnor}
def _pointwise_loss(self, users, items, ratings):
negatives = Variable(
_gpu(
torch.from_numpy(self._random_state.randint(0,
self._num_items,
len(users))),
self._use_cuda)
)
positives_loss = (1.0 - F.sigmoid(self._net(users, items)))
negatives_loss = F.sigmoid(self._net(users, negatives))
return torch.cat([positives_loss, negatives_loss]).mean()
def _bpr_loss(self, users, items, ratings):
negatives = Variable(
_gpu(
torch.from_numpy(self._random_state.randint(0,
self._num_items,
len(users))),
self._use_cuda)
)
return (1.0 - F.sigmoid(self._net(users, items) -
self._net(users, negatives))).mean()
def _adaptive_loss(self, users, items, ratings, n_neg_candidates=5):
negatives = Variable(
_gpu(
torch.from_numpy(
self._random_state.randint(0, self._num_items,
(len(users), n_neg_candidates))),
self._use_cuda)
)
negative_predictions = self._net(
users.repeat(n_neg_candidates, 1).transpose(0,1),
negatives
).view(-1, n_neg_candidates)
best_negative_prediction, _ = negative_predictions.max(1)
positive_prediction = self._net(users, items)
return torch.mean(torch.clamp(best_negative_prediction -
positive_prediction
+ 1.0, 0.0))
def _shuffle(self, interactions):
users = interactions.row
items = interactions.col
ratings = interactions.data
shuffle_indices = np.arange(len(users))
self._random_state.shuffle(shuffle_indices)
return (users[shuffle_indices].astype(np.int64),
items[shuffle_indices].astype(np.int64),
ratings[shuffle_indices].astype(np.float32))
def fit(self, interactions, verbose=False):
"""
Fit the model.
Arguments
---------
interactions: np.float32 coo_matrix of shape [n_users, n_items]
the matrix containing
user-item interactions.
verbose: Bool, optional
Whether to print epoch loss statistics.
"""
self._num_users, self._num_items = interactions.shape
self._net = _gpu(
BilinearNet(self._num_users,
self._num_items,
self._embedding_dim,
xnor=self._xnor,
sparse=self._sparse),
self._use_cuda
)
optimizer = optim.Adam(self._net.parameters(),
lr=self._learning_rate,
weight_decay=self._l2)
if self._loss == 'pointwise':
loss_fnc = self._pointwise_loss
elif self._loss == 'bpr':
loss_fnc = self._bpr_loss
else:
loss_fnc = self._adaptive_loss
for epoch_num in range(self._n_iter):
users, items, ratings = self._shuffle(interactions)
user_ids_tensor = _gpu(torch.from_numpy(users),
self._use_cuda)
item_ids_tensor = _gpu(torch.from_numpy(items),
self._use_cuda)
ratings_tensor = _gpu(torch.from_numpy(ratings),
self._use_cuda)
epoch_loss = 0.0
for (batch_user,
batch_item,
batch_ratings) in zip(_minibatch(user_ids_tensor,
self._batch_size),
_minibatch(item_ids_tensor,
self._batch_size),
_minibatch(ratings_tensor,
self._batch_size)):
user_var = Variable(batch_user)
item_var = Variable(batch_item)
ratings_var = Variable(batch_ratings)
optimizer.zero_grad()
loss = loss_fnc(user_var, item_var, ratings_var)
epoch_loss += loss.data[0]
loss.backward()
# return loss
optimizer.step()
if verbose:
print('Epoch {}: loss {}'.format(epoch_num, epoch_loss))
def predict(self, user_ids, item_ids=None):
"""
Compute the recommendation score for user-item pairs.
Arguments
---------
user_ids: integer or np.int32 array of shape [n_pairs,]
single user id or an array containing the user ids for the user-item pairs for which
a prediction is to be computed
item_ids: optional, np.int32 array of shape [n_pairs,]
an array containing the item ids for the user-item pairs for which
a prediction is to be computed. If not provided, scores for
all items will be computed.
"""
if item_ids is None:
item_ids = np.arange(self._num_items, dtype=np.int64)
if isinstance(user_ids, int):
user_id = user_ids
user_ids = np.empty_like(item_ids)
user_ids.fill(user_id)
user_ids = torch.from_numpy(user_ids.reshape(-1, 1).astype(np.int64))
item_ids = torch.from_numpy(item_ids.reshape(-1, 1).astype(np.int64))
user_var = Variable(_gpu(user_ids, self._use_cuda))
item_var = Variable(_gpu(item_ids, self._use_cuda))
out = self._net(user_var, item_var)
return _cpu(out.data).numpy().flatten()
def get_scorer(self):
get_param = lambda l: _cpu([x for x in l.parameters()][0]).data.numpy().squeeze()
if self._xnor:
return XNORScorer(get_param(self._net.user_embeddings),
get_param(self._net.user_biases),
get_param(self._net.item_embeddings),
get_param(self._net.item_biases))
else:
return Scorer(get_param(self._net.user_embeddings),
get_param(self._net.user_biases),
get_param(self._net.item_embeddings),
get_param(self._net.item_biases))
class Scorer:
def __init__(self,
user_vectors,
user_biases,
item_vectors,
item_biases):
self._user_vectors = align(user_vectors)
self._user_biases = align(user_biases)
self._item_vectors = align(item_vectors)
self._item_biases = align(item_biases)
self._lib = get_lib()
def _parameters(self):
return (self._user_vectors,
self._item_vectors,
self._user_biases,
self._item_biases)
def predict(self, user_id, item_ids=None):
if item_ids is None:
item_ids = slice(0, None, None)
return self._lib.predict_float_256(
align(self._user_vectors[user_id]),
self._item_vectors[item_ids],
self._user_biases[user_id],
self._item_biases[item_ids])
def _predict_bench(self, user_id, out):
return self._lib.predict_float_256(
align(self._user_vectors[user_id]),
self._item_vectors,
self._user_biases[user_id],
self._item_biases,
out)
def memory(self):
get_size = lambda x: x.itemsize * x.size
return sum(get_size(x) for x in self._parameters())
class XNORScorer:
def __init__(self,
user_vectors,
user_biases,
item_vectors,
item_biases):
assert item_vectors.shape[1] >= 32
self._user_norms = align(np.abs(user_vectors).mean(axis=1))
self._item_norms = align(np.abs(item_vectors).mean(axis=1))
self._user_vectors = align(binarize_array(user_vectors))
self._user_biases = align(user_biases)
self._item_vectors = align(binarize_array(item_vectors))
self._item_biases = align(item_biases)
self._lib = get_lib()
def _parameters(self):
return (self._user_norms,
self._item_norms,
self._user_vectors,
self._item_vectors,
self._user_biases,
self._item_biases)
def predict(self, user_id, item_ids=None):
if item_ids is None:
item_ids = slice(0, None, None)
return self._lib.predict_xnor_256(
align(self._user_vectors[user_id]),
self._item_vectors[item_ids],
self._user_biases[user_id],
self._item_biases[item_ids],
self._user_norms[user_id],
self._item_norms[item_ids])
def _predict_bench(self, user_id, out):
return self._lib.predict_xnor_256(
align(self._user_vectors[user_id]),
self._item_vectors,
self._user_biases[user_id],
self._item_biases,
self._user_norms[user_id],
self._item_norms,
out)
def memory(self):
get_size = lambda x: x.itemsize * x.size
return sum(get_size(x) for x in self._parameters())
class PopularityModel:
def __init__(self):
self._popularity = None
def fit(self, interactions):
self._popularity = interactions.getnnz(axis=0).astype(np.float32)
assert len(self._popularity) == interactions.shape[1]
def predict(self, user_ids, item_ids=None):
if item_ids is not None:
return self._popularity[item_ids]
else:
return self._popularity
| apache-2.0 | -5,505,224,720,463,385,000 | 29.374257 | 97 | 0.519525 | false |
samcheck/Scripts | tal/dlTALv2.py | 1 | 1298 | #! python3
# dlTAL.py - Downloads a specific range of "This American Life" eps
import requests, os
from bs4 import BeautifulSoup
# Starting URL
url = 'http://www.podtrac.com/pts/redirect.mp3/podcast.thisamericanlife.org/extended/'
# 'http://audio.thisamericanlife.org/jomamashouse/ismymamashouse/'
# 'http://www.thisamericanlife.org/sites/all/download.php?ep='
url_title = 'http://www.thisamericanlife.org/radio-archives/episode/'
# Range
dl_start = 550
dl_end = 560
exten = '.mp3'
# Place to store podcasts
os.makedirs('TAL', exist_ok=True)
for ep in range(dl_start, (dl_end + 1)):
# Create unique URL for each episode
url_ep = url + str(ep) + exten
url_name = url_title + str(ep)
# Pull name of episode
res = requests.get(url_name)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'html.parser')
# Find title and extract w/ clean up of ':'
save_name = soup.find('h1', class_='node-title').string.replace(':','')
# Download the episode
print('Downloading %s...' % url_ep)
res = requests.get(url_ep)
res.raise_for_status()
# Save the file to ./TAL
audio_file = open(os.path.join('TAL', '#' + save_name + exten), 'wb')
for chunk in res.iter_content(100000):
audio_file.write(chunk)
audio_file.close()
print('Done.')
| mit | 2,411,798,419,417,730,000 | 26.844444 | 86 | 0.669492 | false |
mhcrnl/PmwTkEx | src/Pmw/Pmw_1_3/demos/ButtonBox.py | 1 | 1495 | title = 'Pmw.ButtonBox demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import Tkinter
import Pmw
class Demo:
def __init__(self, parent):
# Create and pack the ButtonBox.
self.buttonBox = Pmw.ButtonBox(parent,
labelpos = 'nw',
label_text = 'ButtonBox:',
frame_borderwidth = 2,
frame_relief = 'groove')
self.buttonBox.pack(fill = 'both', expand = 1, padx = 10, pady = 10)
# Add some buttons to the ButtonBox.
self.buttonBox.add('OK', command = self.ok)
self.buttonBox.add('Apply', command = self.apply)
self.buttonBox.add('Cancel', command = self.cancel)
# Set the default button (the one executed when <Return> is hit).
self.buttonBox.setdefault('OK')
parent.bind('<Return>', self._processReturnKey)
parent.focus_set()
# Make all the buttons the same width.
self.buttonBox.alignbuttons()
def _processReturnKey(self, event):
self.buttonBox.invoke()
def ok(self):
print 'You clicked on OK'
def apply(self):
print 'You clicked on Apply'
def cancel(self):
print 'You clicked on Cancel'
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
| apache-2.0 | -2,719,902,525,558,238,700 | 25.696429 | 76 | 0.62408 | false |
sileht/python-gnocchiclient | gnocchiclient/v1/metric_cli.py | 1 | 15046 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import iso8601
import json
import logging
import sys
from cliff import command
from cliff import lister
from cliff import show
from gnocchiclient import utils
LOG_DEP = logging.getLogger('deprecated')
class CliMetricWithResourceID(command.Command):
def get_parser(self, prog_name):
parser = super(CliMetricWithResourceID, self).get_parser(prog_name)
parser.add_argument("--resource-id", "-r",
help="ID of the resource")
return parser
class CliMetricList(lister.Lister):
"""List metrics"""
COLS = ('id', 'archive_policy/name', 'name', 'unit', 'resource_id')
def get_parser(self, prog_name):
parser = super(CliMetricList, self).get_parser(prog_name)
parser.add_argument("--limit", type=int, metavar="<LIMIT>",
help="Number of metrics to return "
"(Default is server default)")
parser.add_argument("--marker", metavar="<MARKER>",
help="Last item of the previous listing. "
"Return the next results after this value")
parser.add_argument("--sort", action="append", metavar="<SORT>",
help="Sort of metric attribute "
"(example: user_id:desc-nullslast")
return parser
def take_action(self, parsed_args):
metrics = utils.get_client(self).metric.list(
**utils.get_pagination_options(parsed_args))
for metric in metrics:
utils.format_archive_policy(metric["archive_policy"])
utils.format_move_dict_to_root(metric, "archive_policy")
return utils.list2cols(self.COLS, metrics)
class DeprecatedCliMetricList(CliMetricList):
"""Deprecated: List metrics"""
def take_action(self, parsed_args):
LOG_DEP.warning('This command has been deprecated. '
'Please use "metric list" instead.')
return super(DeprecatedCliMetricList, self).take_action(parsed_args)
class CliMetricShow(CliMetricWithResourceID, show.ShowOne):
"""Show a metric"""
def get_parser(self, prog_name):
parser = super(CliMetricShow, self).get_parser(prog_name)
parser.add_argument("metric",
help="ID or name of the metric")
return parser
def take_action(self, parsed_args):
metric = utils.get_client(self).metric.get(
metric=parsed_args.metric,
resource_id=parsed_args.resource_id)
metric['archive_policy/name'] = metric["archive_policy"]["name"]
del metric['archive_policy']
del metric['created_by_user_id']
del metric['created_by_project_id']
utils.format_resource_for_metric(metric)
return self.dict2columns(metric)
class DeprecatedCliMetricShow(CliMetricShow):
"""Deprecated: Show a metric"""
def take_action(self, parsed_args):
LOG_DEP.warning('This command has been deprecated. '
'Please use "metric show" instead.')
return super(DeprecatedCliMetricShow, self).take_action(parsed_args)
class CliMetricCreateBase(show.ShowOne, CliMetricWithResourceID):
def get_parser(self, prog_name):
parser = super(CliMetricCreateBase, self).get_parser(prog_name)
parser.add_argument("--archive-policy-name", "-a",
dest="archive_policy_name",
help="name of the archive policy")
return parser
class CliMetricCreate(CliMetricCreateBase):
"""Create a metric"""
def get_parser(self, prog_name):
parser = super(CliMetricCreate, self).get_parser(prog_name)
parser.add_argument("name", nargs='?',
metavar="METRIC_NAME",
help="Name of the metric")
parser.add_argument("--unit", "-u",
help="unit of the metric")
return parser
def take_action(self, parsed_args):
metric = utils.get_client(self).metric._create_new(
archive_policy_name=parsed_args.archive_policy_name,
name=parsed_args.name,
resource_id=parsed_args.resource_id,
unit=parsed_args.unit,
)
utils.format_resource_for_metric(metric)
if 'archive_policy' in metric:
metric['archive_policy/name'] = metric["archive_policy"]["name"]
del metric['archive_policy']
del metric['created_by_user_id']
del metric['created_by_project_id']
return self.dict2columns(metric)
class DeprecatedCliMetricCreate(CliMetricCreate):
"""Deprecated: Create a metric"""
def take_action(self, parsed_args):
LOG_DEP.warning('This command has been deprecated. '
'Please use "metric create" instead.')
return super(DeprecatedCliMetricCreate, self).take_action(parsed_args)
class CliMetricDelete(CliMetricWithResourceID):
"""Delete a metric"""
def get_parser(self, prog_name):
parser = super(CliMetricDelete, self).get_parser(prog_name)
parser.add_argument("metric", nargs='+',
help="IDs or names of the metric")
return parser
def take_action(self, parsed_args):
for metric in parsed_args.metric:
utils.get_client(self).metric.delete(
metric=metric, resource_id=parsed_args.resource_id)
class DeprecatedCliMetricDelete(CliMetricDelete):
"""Deprecated: Delete a metric"""
def take_action(self, parsed_args):
LOG_DEP.warning('This command has been deprecated. '
'Please use "metric delete" instead.')
return super(DeprecatedCliMetricDelete, self).take_action(parsed_args)
class CliMeasuresReturn(lister.Lister):
def get_parser(self, prog_name):
parser = super(CliMeasuresReturn, self).get_parser(prog_name)
parser.add_argument("--utc", help="Return timestamps as UTC",
default=False,
action="store_true")
return parser
@staticmethod
def format_measures_with_tz(parsed_args, measures):
if parsed_args.utc:
t = lambda x: x
else:
t = utils.dt_to_localtz
return [(t(dt).isoformat(), g, v) for dt, g, v in measures]
class CliMeasuresShow(CliMetricWithResourceID, CliMeasuresReturn,
lister.Lister):
"""Get measurements of a metric"""
COLS = ('timestamp', 'granularity', 'value')
def get_parser(self, prog_name):
parser = super(CliMeasuresShow, self).get_parser(prog_name)
parser.add_argument("metric",
help="ID or name of the metric")
parser.add_argument("--aggregation",
help="aggregation to retrieve")
parser.add_argument("--start",
type=utils.parse_date,
help="beginning of the period")
parser.add_argument("--stop",
type=utils.parse_date,
help="end of the period")
parser.add_argument("--granularity",
help="granularity to retrieve")
parser.add_argument("--refresh", action="store_true",
help="force aggregation of all known measures")
parser.add_argument("--resample",
help=("granularity to resample time-series to "
"(in seconds)"))
return parser
def take_action(self, parsed_args):
measures = utils.get_client(self).metric.get_measures(
metric=parsed_args.metric,
resource_id=parsed_args.resource_id,
aggregation=parsed_args.aggregation,
start=parsed_args.start,
stop=parsed_args.stop,
granularity=parsed_args.granularity,
refresh=parsed_args.refresh,
resample=parsed_args.resample
)
return self.COLS, self.format_measures_with_tz(parsed_args, measures)
class CliMeasuresAddBase(CliMetricWithResourceID):
def get_parser(self, prog_name):
parser = super(CliMeasuresAddBase, self).get_parser(prog_name)
parser.add_argument("metric", help="ID or name of the metric")
return parser
class CliMeasuresAdd(CliMeasuresAddBase):
"""Add measurements to a metric"""
def measure(self, measure):
timestamp, __, value = measure.rpartition("@")
try:
timestamp = utils.parse_date(timestamp).isoformat()
except iso8601.iso8601.ParseError:
# NOTE(sileht): return string as-is and let the server decide
# if it's valid. (like +2hour, now, -5day)
pass
return {'timestamp': timestamp, 'value': float(value)}
def get_parser(self, prog_name):
parser = super(CliMeasuresAdd, self).get_parser(prog_name)
parser.add_argument("-m", "--measure", action='append',
required=True, type=self.measure,
help=("timestamp and value of a measure "
"separated with a '@'"))
return parser
def take_action(self, parsed_args):
utils.get_client(self).metric.add_measures(
metric=parsed_args.metric,
resource_id=parsed_args.resource_id,
measures=parsed_args.measure,
)
class CliMeasuresBatch(command.Command):
def stdin_or_file(self, value):
if value == "-":
return sys.stdin
else:
return open(value, 'r')
def get_parser(self, prog_name):
parser = super(CliMeasuresBatch, self).get_parser(prog_name)
parser.add_argument("file", type=self.stdin_or_file,
help=("File containing measurements to batch or "
"- for stdin (see Gnocchi REST API docs for "
"the format"))
return parser
class CliMetricsMeasuresBatch(CliMeasuresBatch):
def take_action(self, parsed_args):
with parsed_args.file as f:
utils.get_client(self).metric.batch_metrics_measures(json.load(f))
class CliResourcesMetricsMeasuresBatch(CliMeasuresBatch):
def get_parser(self, prog_name):
parser = super(CliResourcesMetricsMeasuresBatch, self).get_parser(
prog_name)
parser.add_argument("--create-metrics", action='store_true',
help="Create unknown metrics"),
return parser
def take_action(self, parsed_args):
with parsed_args.file as f:
utils.get_client(self).metric.batch_resources_metrics_measures(
json.load(f), create_metrics=parsed_args.create_metrics)
class CliMeasuresAggregation(CliMeasuresReturn):
"""Get measurements of aggregated metrics"""
COLS = ('timestamp', 'granularity', 'value')
def get_parser(self, prog_name):
parser = super(CliMeasuresAggregation, self).get_parser(prog_name)
parser.add_argument("-m", "--metric", nargs='+', required=True,
help="metrics IDs or metric name")
parser.add_argument("--aggregation", help="granularity aggregation "
"function to retrieve")
parser.add_argument("--reaggregation",
help="groupby aggregation function to retrieve")
parser.add_argument("--start",
type=utils.parse_date,
help="beginning of the period")
parser.add_argument("--stop",
type=utils.parse_date,
help="end of the period")
parser.add_argument("--granularity",
help="granularity to retrieve")
parser.add_argument("--needed-overlap", type=float,
help=("percent of datapoints in each "
"metrics required"))
utils.add_query_argument("--query", parser)
parser.add_argument("--resource-type", default="generic",
help="Resource type to query"),
parser.add_argument("--groupby",
action='append',
help="Attribute to use to group resources"),
parser.add_argument("--refresh", action="store_true",
help="force aggregation of all known measures")
parser.add_argument("--resample",
help=("granularity to resample time-series to "
"(in seconds)"))
parser.add_argument("--fill",
help=("Value to use when backfilling timestamps "
"with missing values in a subset of series. "
"Value should be a float or 'null'."))
return parser
def take_action(self, parsed_args):
metrics = parsed_args.metric
if parsed_args.query:
if len(parsed_args.metric) != 1:
raise ValueError("One metric is required if query is provided")
metrics = parsed_args.metric[0]
measures = utils.get_client(self).metric.aggregation(
metrics=metrics,
query=parsed_args.query,
aggregation=parsed_args.aggregation,
reaggregation=parsed_args.reaggregation,
start=parsed_args.start,
stop=parsed_args.stop,
granularity=parsed_args.granularity,
needed_overlap=parsed_args.needed_overlap,
resource_type=parsed_args.resource_type,
groupby=parsed_args.groupby,
refresh=parsed_args.refresh,
resample=parsed_args.resample, fill=parsed_args.fill
)
if parsed_args.groupby:
ms = []
for g in measures:
group_name = ", ".join("%s: %s" % (k, g['group'][k])
for k in sorted(g['group']))
for m in g['measures']:
i = [group_name]
i.extend(self.format_measures_with_tz(parsed_args, [m])[0])
ms.append(i)
return ('group',) + self.COLS, ms
return self.COLS, self.format_measures_with_tz(parsed_args, measures)
| apache-2.0 | 5,990,330,502,008,039,000 | 39.229947 | 79 | 0.581749 | false |
paulomagalhaes/spark-ec2 | spark_ec2.py | 1 | 61643 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function, with_statement
import codecs
import hashlib
import itertools
import logging
import os
import os.path
import pipes
import random
import shutil
import string
from stat import S_IRUSR
import subprocess
import sys
import tarfile
import tempfile
import textwrap
import time
import warnings
from datetime import datetime
from optparse import OptionParser
from sys import stderr
if sys.version < "3":
from urllib2 import urlopen, Request, HTTPError
else:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
raw_input = input
xrange = range
SPARK_EC2_VERSION = "1.6.0"
SPARK_EC2_DIR = os.path.dirname(os.path.realpath(__file__))
VALID_SPARK_VERSIONS = set([
"0.7.3",
"0.8.0",
"0.8.1",
"0.9.0",
"0.9.1",
"0.9.2",
"1.0.0",
"1.0.1",
"1.0.2",
"1.1.0",
"1.1.1",
"1.2.0",
"1.2.1",
"1.3.0",
"1.3.1",
"1.4.0",
"1.4.1",
"1.5.0",
"1.5.1",
"1.5.2",
"1.6.0",
])
SPARK_TACHYON_MAP = {
"1.0.0": "0.4.1",
"1.0.1": "0.4.1",
"1.0.2": "0.4.1",
"1.1.0": "0.5.0",
"1.1.1": "0.5.0",
"1.2.0": "0.5.0",
"1.2.1": "0.5.0",
"1.3.0": "0.5.0",
"1.3.1": "0.5.0",
"1.4.0": "0.6.4",
"1.4.1": "0.6.4",
"1.5.0": "0.7.1",
"1.5.1": "0.7.1",
"1.5.2": "0.7.1",
"1.6.0": "0.8.2",
}
DEFAULT_SPARK_VERSION = SPARK_EC2_VERSION
DEFAULT_SPARK_GITHUB_REPO = "https://github.com/apache/spark"
# Default location to get the spark-ec2 scripts (and ami-list) from
DEFAULT_SPARK_EC2_GITHUB_REPO = "https://github.com/amplab/spark-ec2"
DEFAULT_SPARK_EC2_BRANCH = "branch-1.5"
def setup_external_libs(libs):
"""
Download external libraries from PyPI to SPARK_EC2_DIR/lib/ and prepend them to our PATH.
"""
PYPI_URL_PREFIX = "https://pypi.python.org/packages/source"
SPARK_EC2_LIB_DIR = os.path.join(SPARK_EC2_DIR, "lib")
if not os.path.exists(SPARK_EC2_LIB_DIR):
print("Downloading external libraries that spark-ec2 needs from PyPI to {path}...".format(
path=SPARK_EC2_LIB_DIR
))
print("This should be a one-time operation.")
os.mkdir(SPARK_EC2_LIB_DIR)
for lib in libs:
versioned_lib_name = "{n}-{v}".format(n=lib["name"], v=lib["version"])
lib_dir = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name)
if not os.path.isdir(lib_dir):
tgz_file_path = os.path.join(SPARK_EC2_LIB_DIR, versioned_lib_name + ".tar.gz")
print(" - Downloading {lib}...".format(lib=lib["name"]))
download_stream = urlopen(
"{prefix}/{first_letter}/{lib_name}/{lib_name}-{lib_version}.tar.gz".format(
prefix=PYPI_URL_PREFIX,
first_letter=lib["name"][:1],
lib_name=lib["name"],
lib_version=lib["version"]
)
)
with open(tgz_file_path, "wb") as tgz_file:
tgz_file.write(download_stream.read())
with open(tgz_file_path, "rb") as tar:
if hashlib.md5(tar.read()).hexdigest() != lib["md5"]:
print("ERROR: Got wrong md5sum for {lib}.".format(lib=lib["name"]), file=stderr)
sys.exit(1)
tar = tarfile.open(tgz_file_path)
tar.extractall(path=SPARK_EC2_LIB_DIR)
tar.close()
os.remove(tgz_file_path)
print(" - Finished downloading {lib}.".format(lib=lib["name"]))
sys.path.insert(1, lib_dir)
# Only PyPI libraries are supported.
external_libs = [
{
"name": "boto",
"version": "2.34.0",
"md5": "5556223d2d0cc4d06dd4829e671dcecd"
}
]
setup_external_libs(external_libs)
import boto
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType, EBSBlockDeviceType
from boto import ec2
class UsageError(Exception):
pass
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(
prog="spark-ec2",
version="%prog {v}".format(v=SPARK_EC2_VERSION),
usage="%prog [options] <action> <cluster_name>\n\n"
+ "<action> can be: launch, destroy, login, stop, start, get-master, reboot-slaves")
parser.add_option(
"-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: %default)")
parser.add_option(
"-w", "--wait", type="int",
help="DEPRECATED (no longer necessary) - Seconds to wait for nodes to start")
parser.add_option(
"-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option(
"-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option(
"-p", "--profile", default=None,
help="If you have multiple profiles (AWS or boto config), you can configure " +
"additional, named profiles by using this option (default: %default)")
parser.add_option(
"-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: %default). " +
"WARNING: must be 64-bit; small instances won't work")
parser.add_option(
"-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option(
"-r", "--region", default="us-east-1",
help="EC2 region used to launch instances in, or to find them in (default: %default)")
parser.add_option(
"-z", "--zone", default="",
help="Availability zone to launch instances in, or 'all' to spread " +
"slaves across multiple (an additional $0.01/Gb for bandwidth" +
"between zones applies) (default: a single zone chosen at random)")
parser.add_option(
"-a", "--ami",
help="Amazon Machine Image ID to use")
parser.add_option(
"-v", "--spark-version", default=DEFAULT_SPARK_VERSION,
help="Version of Spark to use: 'X.Y.Z' or a specific git hash (default: %default)")
parser.add_option(
"--spark-git-repo",
default=DEFAULT_SPARK_GITHUB_REPO,
help="Github repo from which to checkout supplied commit hash (default: %default)")
parser.add_option(
"--spark-ec2-git-repo",
default=DEFAULT_SPARK_EC2_GITHUB_REPO,
help="Github repo from which to checkout spark-ec2 (default: %default)")
parser.add_option(
"--spark-ec2-git-branch",
default=DEFAULT_SPARK_EC2_BRANCH,
help="Github repo branch of spark-ec2 to use (default: %default)")
parser.add_option(
"--deploy-root-dir",
default=None,
help="A directory to copy into / on the first master. " +
"Must be absolute. Note that a trailing slash is handled as per rsync: " +
"If you omit it, the last directory of the --deploy-root-dir path will be created " +
"in / before copying its contents. If you append the trailing slash, " +
"the directory is not created and its contents are copied directly into /. " +
"(default: %default).")
parser.add_option(
"--hadoop-major-version", default="1",
help="Major version of Hadoop. Valid options are 1 (Hadoop 1.0.4), 2 (CDH 4.2.0), yarn " +
"(Hadoop 2.4.0) (default: %default)")
parser.add_option(
"-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option(
"--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option(
"--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Size (in GB) of each EBS volume.")
parser.add_option(
"--ebs-vol-type", default="standard",
help="EBS volume type (e.g. 'gp2', 'standard').")
parser.add_option(
"--ebs-vol-num", type="int", default=1,
help="Number of EBS volumes to attach to each node as /vol[x]. " +
"The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs. " +
"EBS volumes are only attached if --ebs-vol-size > 0. " +
"Only support up to 8 EBS volumes.")
parser.add_option(
"--placement-group", type="string", default=None,
help="Which placement group to try and launch " +
"instances into. Assumes placement group is already " +
"created.")
parser.add_option(
"--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: %default)")
parser.add_option(
"--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
parser.add_option(
"--ganglia", action="store_true", default=True,
help="Setup Ganglia monitoring on cluster (default: %default). NOTE: " +
"the Ganglia page will be publicly accessible")
parser.add_option(
"--no-ganglia", action="store_false", dest="ganglia",
help="Disable Ganglia monitoring for the cluster")
parser.add_option(
"-u", "--user", default="root",
help="The SSH user you want to connect as (default: %default)")
parser.add_option(
"--delete-groups", action="store_true", default=False,
help="When destroying a cluster, delete the security groups that were created")
parser.add_option(
"--use-existing-master", action="store_true", default=False,
help="Launch fresh slaves, but use an existing stopped master if possible")
parser.add_option(
"--worker-instances", type="int", default=1,
help="Number of instances per worker: variable SPARK_WORKER_INSTANCES. Not used if YARN " +
"is used as Hadoop major version (default: %default)")
parser.add_option(
"--master-opts", type="string", default="",
help="Extra options to give to master through SPARK_MASTER_OPTS variable " +
"(e.g -Dspark.worker.timeout=180)")
parser.add_option(
"--user-data", type="string", default="",
help="Path to a user-data file (most AMIs interpret this as an initialization script)")
parser.add_option(
"--authorized-address", type="string", default="0.0.0.0/0",
help="Address to authorize on created security groups (default: %default)")
parser.add_option(
"--additional-security-group", type="string", default="",
help="Additional security group to place the machines in")
parser.add_option(
"--additional-tags", type="string", default="",
help="Additional tags to set on the machines; tags are comma-separated, while name and " +
"value are colon separated; ex: \"Task:MySparkProject,Env:production\"")
parser.add_option(
"--copy-aws-credentials", action="store_true", default=False,
help="Add AWS credentials to hadoop configuration to allow Spark to access S3")
parser.add_option(
"--subnet-id", default=None,
help="VPC subnet to launch instances in")
parser.add_option(
"--vpc-id", default=None,
help="VPC to launch instances in")
parser.add_option(
"--private-ips", action="store_true", default=False,
help="Use private IPs for instances rather than public if VPC/subnet " +
"requires that.")
parser.add_option(
"--instance-initiated-shutdown-behavior", default="stop",
choices=["stop", "terminate"],
help="Whether instances should terminate when shut down or just stop")
parser.add_option(
"--instance-profile-name", default=None,
help="IAM profile name to launch instances under")
(opts, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
# Boto config check
# http://boto.cloudhackers.com/en/latest/boto_config_tut.html
home_dir = os.getenv('HOME')
if home_dir is None or not os.path.isfile(home_dir + '/.boto'):
if not os.path.isfile('/etc/boto.cfg'):
# If there is no boto config, check aws credentials
if not os.path.isfile(home_dir + '/.aws/credentials'):
if os.getenv('AWS_ACCESS_KEY_ID') is None:
print("ERROR: The environment variable AWS_ACCESS_KEY_ID must be set",
file=stderr)
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print("ERROR: The environment variable AWS_SECRET_ACCESS_KEY must be set",
file=stderr)
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name, vpc_id):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print("Creating security group " + name)
return conn.create_security_group(name, "Spark EC2 group", vpc_id)
def get_validate_spark_version(version, repo):
if "." in version:
version = version.replace("v", "")
if version not in VALID_SPARK_VERSIONS:
print("Don't know about Spark version: {v}".format(v=version), file=stderr)
sys.exit(1)
return version
else:
github_commit_url = "{repo}/commit/{commit_hash}".format(repo=repo, commit_hash=version)
request = Request(github_commit_url)
request.get_method = lambda: 'HEAD'
try:
response = urlopen(request)
except HTTPError as e:
print("Couldn't validate Spark commit: {url}".format(url=github_commit_url),
file=stderr)
print("Received HTTP response code of {code}.".format(code=e.code), file=stderr)
sys.exit(1)
return version
# Source: http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
EC2_INSTANCE_TYPES = {
"c1.medium": "pvm",
"c1.xlarge": "pvm",
"c3.large": "pvm",
"c3.xlarge": "pvm",
"c3.2xlarge": "pvm",
"c3.4xlarge": "pvm",
"c3.8xlarge": "pvm",
"c4.large": "hvm",
"c4.xlarge": "hvm",
"c4.2xlarge": "hvm",
"c4.4xlarge": "hvm",
"c4.8xlarge": "hvm",
"cc1.4xlarge": "hvm",
"cc2.8xlarge": "hvm",
"cg1.4xlarge": "hvm",
"cr1.8xlarge": "hvm",
"d2.xlarge": "hvm",
"d2.2xlarge": "hvm",
"d2.4xlarge": "hvm",
"d2.8xlarge": "hvm",
"g2.2xlarge": "hvm",
"g2.8xlarge": "hvm",
"hi1.4xlarge": "pvm",
"hs1.8xlarge": "pvm",
"i2.xlarge": "hvm",
"i2.2xlarge": "hvm",
"i2.4xlarge": "hvm",
"i2.8xlarge": "hvm",
"m1.small": "pvm",
"m1.medium": "pvm",
"m1.large": "pvm",
"m1.xlarge": "pvm",
"m2.xlarge": "pvm",
"m2.2xlarge": "pvm",
"m2.4xlarge": "pvm",
"m3.medium": "hvm",
"m3.large": "hvm",
"m3.xlarge": "hvm",
"m3.2xlarge": "hvm",
"m4.large": "hvm",
"m4.xlarge": "hvm",
"m4.2xlarge": "hvm",
"m4.4xlarge": "hvm",
"m4.10xlarge": "hvm",
"r3.large": "hvm",
"r3.xlarge": "hvm",
"r3.2xlarge": "hvm",
"r3.4xlarge": "hvm",
"r3.8xlarge": "hvm",
"t1.micro": "pvm",
"t2.micro": "hvm",
"t2.small": "hvm",
"t2.medium": "hvm",
"t2.large": "hvm",
}
def get_tachyon_version(spark_version):
return SPARK_TACHYON_MAP.get(spark_version, "")
# Attempt to resolve an appropriate AMI given the architecture and region of the request.
def get_spark_ami(opts):
if opts.instance_type in EC2_INSTANCE_TYPES:
instance_type = EC2_INSTANCE_TYPES[opts.instance_type]
else:
instance_type = "pvm"
print("Don't recognize %s, assuming type is pvm" % opts.instance_type, file=stderr)
# URL prefix from which to fetch AMI information
ami_prefix = "{r}/{b}/ami-list".format(
r=opts.spark_ec2_git_repo.replace("https://github.com", "https://raw.github.com", 1),
b=opts.spark_ec2_git_branch)
ami_path = "%s/%s/%s" % (ami_prefix, opts.region, instance_type)
reader = codecs.getreader("ascii")
try:
ami = reader(urlopen(ami_path)).read().strip()
except:
print("Could not resolve AMI at: " + ami_path, file=stderr)
sys.exit(1)
print("Spark AMI: " + ami)
return ami
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master and slaves
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
if opts.identity_file is None:
print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
sys.exit(1)
if opts.key_pair is None:
print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
sys.exit(1)
user_data_content = None
if opts.user_data:
with open(opts.user_data) as user_data_file:
user_data_content = user_data_file.read()
print("Setting up security groups...")
master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
authorized_address = opts.authorized_address
if master_group.rules == []: # Group was just now created
if opts.vpc_id is None:
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
else:
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
master_group.authorize('tcp', 22, 22, authorized_address)
master_group.authorize('tcp', 8080, 8081, authorized_address)
master_group.authorize('tcp', 18080, 18080, authorized_address)
master_group.authorize('tcp', 19999, 19999, authorized_address)
master_group.authorize('tcp', 50030, 50030, authorized_address)
master_group.authorize('tcp', 50070, 50070, authorized_address)
master_group.authorize('tcp', 60070, 60070, authorized_address)
master_group.authorize('tcp', 4040, 4045, authorized_address)
# Rstudio (GUI for R) needs port 8787 for web access
master_group.authorize('tcp', 8787, 8787, authorized_address)
# HDFS NFS gateway requires 111,2049,4242 for tcp & udp
master_group.authorize('tcp', 111, 111, authorized_address)
master_group.authorize('udp', 111, 111, authorized_address)
master_group.authorize('tcp', 2049, 2049, authorized_address)
master_group.authorize('udp', 2049, 2049, authorized_address)
master_group.authorize('tcp', 4242, 4242, authorized_address)
master_group.authorize('udp', 4242, 4242, authorized_address)
# RM in YARN mode uses 8088
master_group.authorize('tcp', 8088, 8088, authorized_address)
if opts.ganglia:
master_group.authorize('tcp', 5080, 5080, authorized_address)
if slave_group.rules == []: # Group was just now created
if opts.vpc_id is None:
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
else:
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=master_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=master_group)
slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
src_group=slave_group)
slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
src_group=slave_group)
slave_group.authorize('tcp', 22, 22, authorized_address)
slave_group.authorize('tcp', 8080, 8081, authorized_address)
slave_group.authorize('tcp', 50060, 50060, authorized_address)
slave_group.authorize('tcp', 50075, 50075, authorized_address)
slave_group.authorize('tcp', 60060, 60060, authorized_address)
slave_group.authorize('tcp', 60075, 60075, authorized_address)
# Check if instances are already running in our groups
existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
die_on_error=False)
if existing_slaves or (existing_masters and not opts.use_existing_master):
print("ERROR: There are already instances running in group %s or %s" %
(master_group.name, slave_group.name), file=stderr)
sys.exit(1)
# Figure out Spark AMI
if opts.ami is None:
opts.ami = get_spark_ami(opts)
# we use group ids to work around https://github.com/boto/boto/issues/350
additional_group_ids = []
if opts.additional_security_group:
additional_group_ids = [sg.id
for sg in conn.get_all_security_groups()
if opts.additional_security_group in (sg.name, sg.id)]
print("Launching instances...")
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print("Could not find AMI " + opts.ami, file=stderr)
sys.exit(1)
# Create block device mapping so that we can add EBS volumes if asked to.
# The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
for i in range(opts.ebs_vol_num):
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.volume_type = opts.ebs_vol_type
device.delete_on_termination = True
block_map["/dev/sd" + chr(ord('s') + i)] = device
# AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
if opts.instance_type.startswith('m3.'):
for i in range(get_num_disks(opts.instance_type)):
dev = BlockDeviceType()
dev.ephemeral_name = 'ephemeral%d' % i
# The first ephemeral drive is /dev/sdb.
name = '/dev/sd' + string.ascii_letters[i + 1]
block_map[name] = dev
# Launch slaves
if opts.spot_price is not None:
# Launch spot instances with the requested price
print("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
my_req_ids = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
slave_reqs = conn.request_spot_instances(
price=opts.spot_price,
image_id=opts.ami,
launch_group="launch-group-%s" % cluster_name,
placement=zone,
count=num_slaves_this_zone,
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_profile_name=opts.instance_profile_name)
my_req_ids += [req.id for req in slave_reqs]
i += 1
print("Waiting for spot instances to be granted...")
try:
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active_instance_ids = []
for i in my_req_ids:
if i in id_to_req and id_to_req[i].state == "active":
active_instance_ids.append(id_to_req[i].instance_id)
if len(active_instance_ids) == opts.slaves:
print("All %d slaves granted" % opts.slaves)
reservations = conn.get_all_reservations(active_instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print("%d of %d slaves granted, waiting longer" % (
len(active_instance_ids), opts.slaves))
except:
print("Canceling spot instance requests")
conn.cancel_spot_instance_requests(my_req_ids)
# Log a warning if any of these requests actually launched instances:
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
running = len(master_nodes) + len(slave_nodes)
if running:
print(("WARNING: %d instances are still running" % running), file=stderr)
sys.exit(0)
else:
# Launch non-spot instances
zones = get_zones(conn, opts)
num_zones = len(zones)
i = 0
slave_nodes = []
for zone in zones:
num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
if num_slaves_this_zone > 0:
slave_res = image.run(
key_name=opts.key_pair,
security_group_ids=[slave_group.id] + additional_group_ids,
instance_type=opts.instance_type,
placement=zone,
min_count=num_slaves_this_zone,
max_count=num_slaves_this_zone,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
slave_nodes += slave_res.instances
print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
s=num_slaves_this_zone,
plural_s=('' if num_slaves_this_zone == 1 else 's'),
z=zone,
r=slave_res.id))
i += 1
# Launch or resume masters
if existing_masters:
print("Starting master...")
for inst in existing_masters:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
master_nodes = existing_masters
else:
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
if opts.zone == 'all':
opts.zone = random.choice(conn.get_all_zones()).name
master_res = image.run(
key_name=opts.key_pair,
security_group_ids=[master_group.id] + additional_group_ids,
instance_type=master_type,
placement=opts.zone,
min_count=1,
max_count=1,
block_device_map=block_map,
subnet_id=opts.subnet_id,
placement_group=opts.placement_group,
user_data=user_data_content,
instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
instance_profile_name=opts.instance_profile_name)
master_nodes = master_res.instances
print("Launched master in %s, regid = %s" % (zone, master_res.id))
# This wait time corresponds to SPARK-4983
print("Waiting for AWS to propagate instance metadata...")
time.sleep(15)
# Give the instances descriptive names and set additional tags
additional_tags = {}
if opts.additional_tags.strip():
additional_tags = dict(
map(str.strip, tag.split(':', 1)) for tag in opts.additional_tags.split(',')
)
for master in master_nodes:
master.add_tags(
dict(additional_tags, Name='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
)
for slave in slave_nodes:
slave.add_tags(
dict(additional_tags, Name='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
)
# Return all the instances
return (master_nodes, slave_nodes)
def get_existing_cluster(conn, opts, cluster_name, die_on_error=True):
"""
Get the EC2 instances in an existing cluster if available.
Returns a tuple of lists of EC2 instance objects for the masters and slaves.
"""
print("Searching for existing cluster {c} in region {r}...".format(
c=cluster_name, r=opts.region))
def get_instances(group_names):
"""
Get all non-terminated instances that belong to any of the provided security groups.
EC2 reservation filters and instance states are documented here:
http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options
"""
reservations = conn.get_all_reservations(
filters={"instance.group-name": group_names})
instances = itertools.chain.from_iterable(r.instances for r in reservations)
return [i for i in instances if i.state not in ["shutting-down", "terminated"]]
master_instances = get_instances([cluster_name + "-master"])
slave_instances = get_instances([cluster_name + "-slaves"])
if any((master_instances, slave_instances)):
print("Found {m} master{plural_m}, {s} slave{plural_s}.".format(
m=len(master_instances),
plural_m=('' if len(master_instances) == 1 else 's'),
s=len(slave_instances),
plural_s=('' if len(slave_instances) == 1 else 's')))
if not master_instances and die_on_error:
print("ERROR: Could not find a master for cluster {c} in region {r}.".format(
c=cluster_name, r=opts.region), file=sys.stderr)
sys.exit(1)
return (master_instances, slave_instances)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, opts, deploy_ssh_key):
master = get_dns_name(master_nodes[0], opts.private_ips)
if deploy_ssh_key:
print("Generating cluster's SSH key on master...")
key_setup = """
[ -f ~/.ssh/id_rsa ] ||
(ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys)
"""
ssh(master, opts, key_setup)
dot_ssh_tar = ssh_read(master, opts, ['tar', 'c', '.ssh'])
print("Transferring cluster's SSH key to slaves...")
for slave in slave_nodes:
slave_address = get_dns_name(slave, opts.private_ips)
print(slave_address)
ssh_write(slave_address, opts, ['tar', 'x'], dot_ssh_tar)
modules = ['spark', 'ephemeral-hdfs', 'persistent-hdfs',
'mapreduce', 'spark-standalone', 'tachyon', 'rstudio']
if opts.hadoop_major_version == "1":
modules = list(filter(lambda x: x != "mapreduce", modules))
if opts.ganglia:
modules.append('ganglia')
# Clear SPARK_WORKER_INSTANCES if running on YARN
if opts.hadoop_major_version == "yarn":
opts.worker_instances = ""
# NOTE: We should clone the repository before running deploy_files to
# prevent ec2-variables.sh from being overwritten
print("Cloning spark-ec2 scripts from {r}/tree/{b} on master...".format(
r=opts.spark_ec2_git_repo, b=opts.spark_ec2_git_branch))
ssh(
host=master,
opts=opts,
command="rm -rf spark-ec2"
+ " && "
+ "git clone {r} -b {b} spark-ec2".format(r=opts.spark_ec2_git_repo,
b=opts.spark_ec2_git_branch)
)
print("Deploying files to master...")
deploy_files(
conn=conn,
root_dir=SPARK_EC2_DIR + "/" + "deploy.generic",
opts=opts,
master_nodes=master_nodes,
slave_nodes=slave_nodes,
modules=modules
)
if opts.deploy_root_dir is not None:
print("Deploying {s} to master...".format(s=opts.deploy_root_dir))
deploy_user_files(
root_dir=opts.deploy_root_dir,
opts=opts,
master_nodes=master_nodes
)
print("Running setup on master...")
setup_spark_cluster(master, opts)
print("Done!")
def setup_spark_cluster(master, opts):
ssh(master, opts, "chmod u+x spark-ec2/setup.sh")
ssh(master, opts, "spark-ec2/setup.sh")
print("Spark standalone cluster started at http://%s:8080" % master)
if opts.ganglia:
print("Ganglia started at http://%s:5080/ganglia" % master)
def is_ssh_available(host, opts, print_ssh_output=True):
"""
Check if SSH is available on a host.
"""
s = subprocess.Popen(
ssh_command(opts) + ['-t', '-t', '-o', 'ConnectTimeout=3',
'%s@%s' % (opts.user, host), stringify_command('true')],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT # we pipe stderr through stdout to preserve output order
)
cmd_output = s.communicate()[0] # [1] is stderr, which we redirected to stdout
if s.returncode != 0 and print_ssh_output:
# extra leading newline is for spacing in wait_for_cluster_state()
print(textwrap.dedent("""\n
Warning: SSH connection error. (This could be temporary.)
Host: {h}
SSH return code: {r}
SSH output: {o}
""").format(
h=host,
r=s.returncode,
o=cmd_output.strip()
))
return s.returncode == 0
def is_cluster_ssh_available(cluster_instances, opts):
"""
Check if SSH is available on all the instances in a cluster.
"""
for i in cluster_instances:
dns_name = get_dns_name(i, opts.private_ips)
if not is_ssh_available(host=dns_name, opts=opts):
return False
else:
return True
def wait_for_cluster_state(conn, opts, cluster_instances, cluster_state):
"""
Wait for all the instances in the cluster to reach a designated state.
cluster_instances: a list of boto.ec2.instance.Instance
cluster_state: a string representing the desired state of all the instances in the cluster
value can be 'ssh-ready' or a valid value from boto.ec2.instance.InstanceState such as
'running', 'terminated', etc.
(would be nice to replace this with a proper enum: http://stackoverflow.com/a/1695250)
"""
sys.stdout.write(
"Waiting for cluster to enter '{s}' state.".format(s=cluster_state)
)
sys.stdout.flush()
start_time = datetime.now()
num_attempts = 0
while True:
time.sleep(5 * num_attempts) # seconds
for i in cluster_instances:
i.update()
max_batch = 100
statuses = []
for j in xrange(0, len(cluster_instances), max_batch):
batch = [i.id for i in cluster_instances[j:j + max_batch]]
statuses.extend(conn.get_all_instance_status(instance_ids=batch))
if cluster_state == 'ssh-ready':
if all(i.state == 'running' for i in cluster_instances) and \
all(s.system_status.status == 'ok' for s in statuses) and \
all(s.instance_status.status == 'ok' for s in statuses) and \
is_cluster_ssh_available(cluster_instances, opts):
break
else:
if all(i.state == cluster_state for i in cluster_instances):
break
num_attempts += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("\n")
end_time = datetime.now()
print("Cluster is now in '{s}' state. Waited {t} seconds.".format(
s=cluster_state,
t=(end_time - start_time).seconds
))
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
# Source: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html
# Last Updated: 2015-06-19
# For easy maintainability, please keep this manually-inputted dictionary sorted by key.
disks_by_instance = {
"c1.medium": 1,
"c1.xlarge": 4,
"c3.large": 2,
"c3.xlarge": 2,
"c3.2xlarge": 2,
"c3.4xlarge": 2,
"c3.8xlarge": 2,
"c4.large": 0,
"c4.xlarge": 0,
"c4.2xlarge": 0,
"c4.4xlarge": 0,
"c4.8xlarge": 0,
"cc1.4xlarge": 2,
"cc2.8xlarge": 4,
"cg1.4xlarge": 2,
"cr1.8xlarge": 2,
"d2.xlarge": 3,
"d2.2xlarge": 6,
"d2.4xlarge": 12,
"d2.8xlarge": 24,
"g2.2xlarge": 1,
"g2.8xlarge": 2,
"hi1.4xlarge": 2,
"hs1.8xlarge": 24,
"i2.xlarge": 1,
"i2.2xlarge": 2,
"i2.4xlarge": 4,
"i2.8xlarge": 8,
"m1.small": 1,
"m1.medium": 1,
"m1.large": 2,
"m1.xlarge": 4,
"m2.xlarge": 1,
"m2.2xlarge": 1,
"m2.4xlarge": 2,
"m3.medium": 1,
"m3.large": 1,
"m3.xlarge": 2,
"m3.2xlarge": 2,
"m4.large": 0,
"m4.xlarge": 0,
"m4.2xlarge": 0,
"m4.4xlarge": 0,
"m4.10xlarge": 0,
"r3.large": 1,
"r3.xlarge": 1,
"r3.2xlarge": 1,
"r3.4xlarge": 1,
"r3.8xlarge": 2,
"t1.micro": 0,
"t2.micro": 0,
"t2.small": 0,
"t2.medium": 0,
"t2.large": 0,
}
if instance_type in disks_by_instance:
return disks_by_instance[instance_type]
else:
print("WARNING: Don't know number of disks on instance type %s; assuming 1"
% instance_type, file=stderr)
return 1
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
#
# root_dir should be an absolute path to the directory with the files we want to deploy.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, modules):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
spark_local_dirs = "/mnt/spark"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
spark_local_dirs += ",/mnt%d/spark" % i
cluster_url = "%s:7077" % active_master
if "." in opts.spark_version:
# Pre-built Spark deploy
spark_v = get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
tachyon_v = get_tachyon_version(spark_v)
else:
# Spark-only custom deploy
spark_v = "%s|%s" % (opts.spark_git_repo, opts.spark_version)
tachyon_v = ""
print("Deploying Spark via git hash; Tachyon won't be set up")
modules = filter(lambda x: x != "tachyon", modules)
master_addresses = [get_dns_name(i, opts.private_ips) for i in master_nodes]
slave_addresses = [get_dns_name(i, opts.private_ips) for i in slave_nodes]
worker_instances_str = "%d" % opts.worker_instances if opts.worker_instances else ""
template_vars = {
"master_list": '\n'.join(master_addresses),
"active_master": active_master,
"slave_list": '\n'.join(slave_addresses),
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs,
"spark_local_dirs": spark_local_dirs,
"swap": str(opts.swap),
"modules": '\n'.join(modules),
"spark_version": spark_v,
"tachyon_version": tachyon_v,
"hadoop_major_version": opts.hadoop_major_version,
"spark_worker_instances": worker_instances_str,
"spark_master_opts": opts.master_opts
}
if opts.copy_aws_credentials:
template_vars["aws_access_key_id"] = conn.aws_access_key_id
template_vars["aws_secret_access_key"] = conn.aws_secret_access_key
else:
template_vars["aws_access_key_id"] = ""
template_vars["aws_secret_access_key"] = ""
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
if path.find(".svn") == -1:
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s/" % tmp_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Deploy a given local directory to a cluster, WITHOUT parameter substitution.
# Note that unlike deploy_files, this works for binary files.
# Also, it is up to the user to add (or not) the trailing slash in root_dir.
# Files are only deployed to the first master instance in the cluster.
#
# root_dir should be an absolute path.
def deploy_user_files(root_dir, opts, master_nodes):
active_master = get_dns_name(master_nodes[0], opts.private_ips)
command = [
'rsync', '-rv',
'-e', stringify_command(ssh_command(opts)),
"%s" % root_dir,
"%s@%s:/" % (opts.user, active_master)
]
subprocess.check_call(command)
def stringify_command(parts):
if isinstance(parts, str):
return parts
else:
return ' '.join(map(pipes.quote, parts))
def ssh_args(opts):
parts = ['-o', 'StrictHostKeyChecking=no']
parts += ['-o', 'UserKnownHostsFile=/dev/null']
if opts.identity_file is not None:
parts += ['-i', opts.identity_file]
return parts
def ssh_command(opts):
return ['ssh'] + ssh_args(opts)
# Run a command on a host through ssh, retrying up to five times
# and then throwing an exception if ssh continues to fail.
def ssh(host, opts, command):
tries = 0
while True:
try:
return subprocess.check_call(
ssh_command(opts) + ['-t', '-t', '%s@%s' % (opts.user, host),
stringify_command(command)])
except subprocess.CalledProcessError as e:
if tries > 5:
# If this was an ssh failure, provide the user with hints.
if e.returncode == 255:
raise UsageError(
"Failed to SSH to remote host {0}.\n"
"Please check that you have provided the correct --identity-file and "
"--key-pair parameters and try again.".format(host))
else:
raise e
print("Error executing remote command, retrying after 30 seconds: {0}".format(e),
file=stderr)
time.sleep(30)
tries = tries + 1
# Backported from Python 2.7 for compatiblity with 2.6 (See SPARK-1990)
def _check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def ssh_read(host, opts, command):
return _check_output(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)])
def ssh_write(host, opts, command, arguments):
tries = 0
while True:
proc = subprocess.Popen(
ssh_command(opts) + ['%s@%s' % (opts.user, host), stringify_command(command)],
stdin=subprocess.PIPE)
proc.stdin.write(arguments)
proc.stdin.close()
status = proc.wait()
if status == 0:
break
elif tries > 5:
raise RuntimeError("ssh_write failed with error %s" % proc.returncode)
else:
print("Error {0} while executing remote command, retrying after 30 seconds".
format(status), file=stderr)
time.sleep(30)
tries = tries + 1
# Gets a list of zones to launch instances in
def get_zones(conn, opts):
if opts.zone == 'all':
zones = [z.name for z in conn.get_all_zones()]
else:
zones = [opts.zone]
return zones
# Gets the number of items in a partition
def get_partition(total, num_partitions, current_partitions):
num_slaves_this_zone = total // num_partitions
if (total % num_partitions) - current_partitions > 0:
num_slaves_this_zone += 1
return num_slaves_this_zone
# Gets the IP address, taking into account the --private-ips flag
def get_ip_address(instance, private_ips=False):
ip = instance.ip_address if not private_ips else \
instance.private_ip_address
return ip
# Gets the DNS name, taking into account the --private-ips flag
def get_dns_name(instance, private_ips=False):
dns = instance.public_dns_name if not private_ips else \
instance.private_ip_address
if not dns:
raise UsageError("Failed to determine hostname of {0}.\n"
"Please check that you provided --private-ips if "
"necessary".format(instance))
return dns
def real_main():
(opts, action, cluster_name) = parse_args()
# Input parameter validation
get_validate_spark_version(opts.spark_version, opts.spark_git_repo)
if opts.wait is not None:
# NOTE: DeprecationWarnings are silent in 2.7+ by default.
# To show them, run Python with the -Wdefault switch.
# See: https://docs.python.org/3.5/whatsnew/2.7.html
warnings.warn(
"This option is deprecated and has no effect. "
"spark-ec2 automatically waits as long as necessary for clusters to start up.",
DeprecationWarning
)
if opts.identity_file is not None:
if not os.path.exists(opts.identity_file):
print("ERROR: The identity file '{f}' doesn't exist.".format(f=opts.identity_file),
file=stderr)
sys.exit(1)
file_mode = os.stat(opts.identity_file).st_mode
if not (file_mode & S_IRUSR) or not oct(file_mode)[-2:] == '00':
print("ERROR: The identity file must be accessible only by you.", file=stderr)
print('You can fix this with: chmod 400 "{f}"'.format(f=opts.identity_file),
file=stderr)
sys.exit(1)
if opts.instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for instance-type: {t}".format(
t=opts.instance_type), file=stderr)
if opts.master_instance_type != "":
if opts.master_instance_type not in EC2_INSTANCE_TYPES:
print("Warning: Unrecognized EC2 instance type for master-instance-type: {t}".format(
t=opts.master_instance_type), file=stderr)
# Since we try instance types even if we can't resolve them, we check if they resolve first
# and, if they do, see if they resolve to the same virtualization type.
if opts.instance_type in EC2_INSTANCE_TYPES and \
opts.master_instance_type in EC2_INSTANCE_TYPES:
if EC2_INSTANCE_TYPES[opts.instance_type] != \
EC2_INSTANCE_TYPES[opts.master_instance_type]:
print("Error: spark-ec2 currently does not support having a master and slaves "
"with different AMI virtualization types.", file=stderr)
print("master instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.master_instance_type]), file=stderr)
print("slave instance virtualization type: {t}".format(
t=EC2_INSTANCE_TYPES[opts.instance_type]), file=stderr)
sys.exit(1)
if opts.ebs_vol_num > 8:
print("ebs-vol-num cannot be greater than 8", file=stderr)
sys.exit(1)
# Prevent breaking ami_prefix (/, .git and startswith checks)
# Prevent forks with non spark-ec2 names for now.
if opts.spark_ec2_git_repo.endswith("/") or \
opts.spark_ec2_git_repo.endswith(".git") or \
not opts.spark_ec2_git_repo.startswith("https://github.com") or \
not opts.spark_ec2_git_repo.endswith("spark-ec2"):
print("spark-ec2-git-repo must be a github repo and it must not have a trailing / or .git. "
"Furthermore, we currently only support forks named spark-ec2.", file=stderr)
sys.exit(1)
if not (opts.deploy_root_dir is None or
(os.path.isabs(opts.deploy_root_dir) and
os.path.isdir(opts.deploy_root_dir) and
os.path.exists(opts.deploy_root_dir))):
print("--deploy-root-dir must be an absolute path to a directory that exists "
"on the local file system", file=stderr)
sys.exit(1)
try:
if opts.profile is None:
conn = ec2.connect_to_region(opts.region)
else:
conn = ec2.connect_to_region(opts.region, profile_name=opts.profile)
except Exception as e:
print((e), file=stderr)
sys.exit(1)
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.slaves <= 0:
print("ERROR: You have to start at least 1 slave", file=sys.stderr)
sys.exit(1)
if opts.resume:
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
else:
(master_nodes, slave_nodes) = launch_cluster(conn, opts, cluster_name)
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
setup_cluster(conn, master_nodes, slave_nodes, opts, True)
elif action == "destroy":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
if any(master_nodes + slave_nodes):
print("The following instances will be terminated:")
for inst in master_nodes + slave_nodes:
print("> %s" % get_dns_name(inst, opts.private_ips))
print("ALL DATA ON ALL NODES WILL BE LOST!!")
msg = "Are you sure you want to destroy the cluster {c}? (y/N) ".format(c=cluster_name)
response = raw_input(msg)
if response == "y":
print("Terminating master...")
for inst in master_nodes:
inst.terminate()
print("Terminating slaves...")
for inst in slave_nodes:
inst.terminate()
# Delete security groups as well
if opts.delete_groups:
group_names = [cluster_name + "-master", cluster_name + "-slaves"]
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='terminated'
)
print("Deleting security groups (this will take some time)...")
attempt = 1
while attempt <= 3:
print("Attempt %d" % attempt)
groups = [g for g in conn.get_all_security_groups() if g.name in group_names]
success = True
# Delete individual rules in all groups before deleting groups to
# remove dependencies between them
for group in groups:
print("Deleting rules in security group " + group.name)
for rule in group.rules:
for grant in rule.grants:
success &= group.revoke(ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group=grant)
# Sleep for AWS eventual-consistency to catch up, and for instances
# to terminate
time.sleep(30) # Yes, it does have to be this long :-(
for group in groups:
try:
# It is needed to use group_id to make it work with VPC
conn.delete_security_group(group_id=group.id)
print("Deleted security group %s" % group.name)
except boto.exception.EC2ResponseError:
success = False
print("Failed to delete security group %s" % group.name)
# Unfortunately, group.revoke() returns True even if a rule was not
# deleted, so this needs to be rerun if something fails
if success:
break
attempt += 1
if not success:
print("Failed to delete all security groups after 3 tries.")
print("Try re-running in a few minutes.")
elif action == "login":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
master = get_dns_name(master_nodes[0], opts.private_ips)
print("Logging into master " + master + "...")
proxy_opt = []
if opts.proxy_port is not None:
proxy_opt = ['-D', opts.proxy_port]
subprocess.check_call(
ssh_command(opts) + proxy_opt + ['-t', '-t', "%s@%s" % (opts.user, master)])
elif action == "reboot-slaves":
response = raw_input(
"Are you sure you want to reboot the cluster " +
cluster_name + " slaves?\n" +
"Reboot cluster slaves " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Rebooting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
print("Rebooting " + inst.id)
inst.reboot()
elif action == "get-master":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
if not master_nodes[0].public_dns_name and not opts.private_ips:
print("Master has no public DNS name. Maybe you meant to specify --private-ips?")
else:
print(get_dns_name(master_nodes[0], opts.private_ips))
elif action == "stop":
response = raw_input(
"Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"All data on spot-instance slaves will be lost.\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes) = get_existing_cluster(
conn, opts, cluster_name, die_on_error=False)
print("Stopping master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print("Stopping slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
if inst.spot_instance_request_id:
inst.terminate()
else:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes) = get_existing_cluster(conn, opts, cluster_name)
print("Starting slaves...")
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print("Starting master...")
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster_state(
conn=conn,
opts=opts,
cluster_instances=(master_nodes + slave_nodes),
cluster_state='ssh-ready'
)
# Determine types of running instances
existing_master_type = master_nodes[0].instance_type
existing_slave_type = slave_nodes[0].instance_type
# Setting opts.master_instance_type to the empty string indicates we
# have the same instance type for the master and the slaves
if existing_master_type == existing_slave_type:
existing_master_type = ""
opts.master_instance_type = existing_master_type
opts.instance_type = existing_slave_type
setup_cluster(conn, master_nodes, slave_nodes, opts, False)
else:
print("Invalid action: %s" % action, file=stderr)
sys.exit(1)
def main():
try:
real_main()
except UsageError as e:
print("\nError:\n", e, file=stderr)
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| apache-2.0 | -5,231,567,560,959,082,000 | 39.289542 | 100 | 0.578217 | false |
jwparker1797/USAR_Add_In | Install/Data/GP_tools/HQIIS_report.py | 1 | 9879 | ##############################################################
##HQIIS Updater Version 2.0
##
##*Changes for v2.0
## -Upgraded tool to work with SDSFIE 3.1 Army Adaptation
##
##This script takes data from an HQIIS excel file and can run a report from a USAR SDSFIE 3.1 geodatabase.
##
##This script only works on GIS records with the correct "rpuid".
##
##
##Created 27 July 2015.
##
## Created and tested on:
## Windows 7 (64 bit) operating system
## ESRI ArcGIS 10.2
## Python 2.7.3
##
##Author:
##Jesse Parker
##USAR ARIMD
##650.526.9418
##############################################################
def HQIIS_Report(params):
import arcpy, sys, os
from helper_functions.get_data_file import get_data_file
database = params[0].valueAsText
report_location = params[1].valueAsText
report_name = params[2].valueAsText
siteuid = params[3].value
open_report = params[4].value
# ##Split excel path
# path = os.path.split(HQIIS_excel_raw)
# HQIIS_excel = path[1]
# HQIIS_excel_location = path[0]
#
# ##Set the workspace to the folder containing the HQIIS excel file
# arcpy.env.workspace = HQIIS_excel_location
# arcpy.env.overwriteOutput = True
#
# try:
# ##Convert the HQIIS excel to .dbf
# arcpy.AddMessage("Converting excel...")
# HQIIS = arcpy.ExcelToTable_conversion(HQIIS_excel,"HQIIS_table.dbf")
# arcpy.AddMessage("Finished Converting...")
# except Exception as e:
# arcpy.AddMessage("Failed to convert HQIIS excel file.")
# sys.exit(arcpy.AddMessage(e.message))
HQIIS = get_data_file("Data.gdb\\HQIIS")
#HQIIS = os.path.split(__file__)[0] + r"\data\HQIIS.dbf"
work_folder = os.path.split(database)[0]
try:
##Create HQIIS report
arcpy.AddMessage("Creating report file...")
arcpy.env.workspace = report_location
arcpy.env.overwriteOutput = True
HQIIS_report = arcpy.TableToTable_conversion(HQIIS, work_folder, report_name + ".dbf")
fields = [f.name for f in arcpy.ListFields(HQIIS_report)]
arcpy.DeleteField_management(HQIIS_report, [fields[1],fields[2],fields[3],fields[4],fields[6],fields[10],fields[11],fields[13],fields[14],fields[15],fields[16],fields[17],fields[18],fields[19],fields[20],fields[21],fields[22],fields[23],fields[24],fields[25],fields[26],fields[29],fields[30],fields[33],fields[34],fields[35],fields[36],fields[37],fields[38],fields[39],fields[40],fields[41]])
arcpy.AddField_management(HQIIS_report, "GIS_QTY","FLOAT")
arcpy.AddField_management(HQIIS_report, "GIS_UOM","TEXT")
if siteuid != 0:
site_uid_delim = arcpy.AddFieldDelimiters (HQIIS_report, "SITE_UID")
arcpy.TableSelect_analysis(HQIIS_report,report_name + "_" + "site" + ".dbf", site_uid_delim + " = " + str(siteuid))
arcpy.arcpy.Delete_management(HQIIS_report)
HQIIS_report = report_location + os.sep + report_name + "_" + "site" + ".dbf"
arcpy.env.workspace = database
arcpy.env.overwriteOutput = True
##Generate Report
arcpy.AddMessage("Generating report...")
datasetlist = arcpy.ListDatasets("*", "Feature")
for dataset in datasetlist:
FC_list = arcpy.ListFeatureClasses("*","",dataset)
for FC in FC_list:
try:
#Skip centerline featureclasses for report
fc_list_wo_current = [fc for fc in FC_list if fc != FC]
if "Centerline" in FC or (FC[:-2] in [s[:-2] for s in fc_list_wo_current] and FC[-1] == 'L'):
continue
desc = arcpy.Describe(FC)
shape_type = desc.shapeType
fields = [f.name for f in arcpy.ListFields(FC)]
##Check for feature class shape type
if shape_type == "Polygon":
##Summarize the stats of the feature class
arcpy.Statistics_analysis (FC, work_folder +"\sum_stat.dbf", [["featureArea","SUM"], ["featureAreaUOM","FIRST"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","SUM_featur","FIRST_feat"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
uom = row[2]
if uom in ["YD2","SYD","squareYard"]:
uom = "SY"
if uom in ["FT2","SFT","squareFoot",]:
uom = "SF"
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
for row in cursor:
if str(row[0]) == rpuid:
row[1] += qty
row[2] += uom
cursor.updateRow(row)
elif shape_type == "Polyline":
arcpy.Statistics_analysis (FC, work_folder +"\sum_stat.dbf", [["featureLength","SUM"], ["featureLengthUOM","FIRST"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","SUM_featur","FIRST_feat"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
uom = row[2]
if uom in ["FT","foot"]:
uom = "LF"
if uom in ["YD","yard"]:
uom = "YD"
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
for row in cursor:
if str(row[0]) == rpuid:
row[1] += qty
row[2] += uom
cursor.updateRow(row)
elif shape_type == "Point":
arcpy.Statistics_analysis(FC, work_folder + "\sum_stat.dbf", [["rpuid","COUNT"]], "rpuid")
with arcpy.da.SearchCursor(work_folder +"\sum_stat.dbf",["rpuid","COUNT_rpui"]) as cursor:
for row in cursor:
##Cursor through the summary to collect values
if row[0] != "":
try:
rpuid = int(row[0])
except:
rpuid = row[0]
rpuid = str(rpuid)
qty = row[1]
##update report with collected values from the summary
with arcpy.da.UpdateCursor(HQIIS_report,["RPA_UID","GIS_QTY","GIS_UOM"]) as cursor:
for row in cursor:
if str(row[0]) == rpuid:
row[1] += qty
row[2] += "EA"
cursor.updateRow(row)
except Exception as e:
arcpy.AddMessage("Error in feature class " + str(FC))
arcpy.AddMessage(e.message)
arcpy.AddMessage("Feature class skipped, continuing...")
continue
##Delete summary statistic table
arcpy.Delete_management (work_folder +"\sum_stat.dbf")
arcpy.env.workspace = report_location
arcpy.env.overwriteOutput = True
##Checks for a report and converts it to excel and opens if user wanted it to open on completion
if arcpy.Exists(HQIIS_report) == True:
arcpy.TableToExcel_conversion (HQIIS_report, report_name + ".xls")
arcpy.Delete_management(HQIIS_report)
if open_report == True:
os.startfile(report_location + os.sep + report_name + ".xls")
arcpy.AddMessage("Report Complete")
except Exception as e:
arcpy.AddMessage("Report unable to run...Error: ")
arcpy.AddMessage(e.message)
arcpy.AddMessage("Line: ")
arcpy.AddMessage(sys.exc_info()[2].tb_lineno)
##finally:
## if arcpy.Exists(HQIIS_excel_location +"\sum_stat.dbf") == True:
## arcpy.Delete_management (HQIIS_excel_location +"\sum_stat.dbf")
## if arcpy.Exists(HQIIS_report) == True:
## arcpy.Delete_management(HQIIS_report)
## arcpy.Delete_management(HQIIS)
| gpl-3.0 | 8,414,839,463,250,512,000 | 53.28022 | 400 | 0.469582 | false |
metacollin/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/cq_make_capacitors_export_fc.py | 1 | 8497 | # -*- coding: utf8 -*-
#!/usr/bin/python
#
# This is derived from a cadquery script for generating PDIP models in X3D format
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
# This is a
# Dimensions are from Microchips Packaging Specification document:
# DS00000049BY. Body drawing is the same as QFP generator#
## requirements
## cadquery FreeCAD plugin
## https://github.com/jmwright/cadquery-freecad-module
## to run the script just do: freecad make_gwexport_fc.py modelName
## e.g. c:\freecad\bin\freecad make_gw_export_fc.py SOIC_8
## the script will generate STEP and VRML parametric models
## to be used with kicad StepUp script
#* These are a FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* *
#* cadquery script for generating QFP/SOIC/SSOP/TSSOP models in STEP AP214 *
#* Copyright (c) 2015 *
#* Maurice https://launchpad.net/~easyw *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
__title__ = "make chip capacitors 3D models"
__author__ = "maurice"
__Comment__ = 'make chip capacitos 3D models exported to STEP and VRML for Kicad StepUP script'
___ver___ = "1.3.1 14/08/2015"
# maui import cadquery as cq
# maui from Helpers import show
from math import tan, radians, sqrt
from collections import namedtuple
import sys, os
# maui start
import FreeCAD, Draft, FreeCADGui
import ImportGui
if FreeCAD.GuiUp:
from PySide import QtCore, QtGui
#checking requirements
#######################################################################
FreeCAD.Console.PrintMessage("FC Version \r\n")
FreeCAD.Console.PrintMessage(FreeCAD.Version())
FC_majorV=FreeCAD.Version()[0];FC_minorV=FreeCAD.Version()[1]
FreeCAD.Console.PrintMessage('FC Version '+FC_majorV+FC_minorV+'\r\n')
if int(FC_majorV) <= 0:
if int(FC_minorV) < 15:
reply = QtGui.QMessageBox.information(None,"Warning! ...","use FreeCAD version >= "+FC_majorV+"."+FC_minorV+"\r\n")
# FreeCAD.Console.PrintMessage(all_params_soic)
FreeCAD.Console.PrintMessage(FreeCAD.ConfigGet("AppHomePath")+'Mod/')
file_path_cq=FreeCAD.ConfigGet("AppHomePath")+'Mod/CadQuery'
if os.path.exists(file_path_cq):
FreeCAD.Console.PrintMessage('CadQuery exists\r\n')
else:
msg="missing CadQuery Module!\r\n\r\n"
msg+="https://github.com/jmwright/cadquery-freecad-module/wiki"
reply = QtGui.QMessageBox.information(None,"Info ...",msg)
#######################################################################
from Gui.Command import *
outdir=os.path.dirname(os.path.realpath(__file__))
sys.path.append(outdir)
# Import cad_tools
import cq_cad_tools
# Reload tools
reload(cq_cad_tools)
# Explicitly load all needed functions
from cq_cad_tools import FuseObjs_wColors, GetListOfObjects, restore_Main_Tools, \
exportSTEP, close_CQ_Example, exportVRML, saveFCdoc, z_RotateObject
# Gui.SendMsgToActiveView("Run")
Gui.activateWorkbench("CadQueryWorkbench")
import FreeCADGui as Gui
close_CQ_Example(App, Gui)
# from export_x3d import exportX3D, Mesh
import cadquery as cq
from Helpers import show
# maui end
import cq_params_chip_cap # modules parameters
from cq_params_chip_cap import *
def make_chip(params):
# dimensions for chip capacitors
L = params.L # package length
W = params.W # package width
T = params.T # package height
pb = params.pb # pin band
pt = params.pt # pin thickness
ef = params.ef # fillet of edges
modelName = params.modelName # Model Name
rotation = params.rotation # rotation
# Create a 3D box based on the dimension variables above and fillet it
case = cq.Workplane("XY").box(L-2*pb, W-2*pt, T-2*pt)
case.edges("|X").fillet(ef)
# body.edges("|Z").fillet(ef)
#translate the object
case=case.translate((0,0,T/2)).rotate((0,0,0), (0,0,1), 0)
## # extract pins from the case
## case = case.cut(pins)
# Create a 3D box based on the dimension variables above and fillet it
pin1 = cq.Workplane("XY").box(pb, W, T)
pin1.edges("|X").fillet(ef)
pin1=pin1.translate((-L/2+pb/2,0,T/2)).rotate((0,0,0), (0,0,1), 0)
pin2 = cq.Workplane("XY").box(pb, W, T)
pin2.edges("|X").fillet(ef)
pin2=pin2.translate((L/2-pb/2,0,T/2)).rotate((0,0,0), (0,0,1), 0)
pins = pin1.union(pin2)
#body_copy.ShapeColor=result.ShapeColor
return (case, pins)
# The dimensions of the box. These can be modified rather than changing the
# object's code directly.
# when run from freecad-cadquery
if __name__ == "temp.module":
ModelName=""
# when run from command line
if __name__ == "__main__":
FreeCAD.Console.PrintMessage('\r\nRunning...\r\n')
if len(sys.argv) < 3:
FreeCAD.Console.PrintMessage('No variant name is given! building c_1206_h106')
model_to_build='1206_h106'
else:
model_to_build=sys.argv[2]
if model_to_build == "all":
variants = all_params.keys()
FreeCAD.Console.PrintMessage(variants)
FreeCAD.Console.PrintMessage('\r\n')
else:
variants = [model_to_build]
for variant in variants:
FreeCAD.Console.PrintMessage('\r\n'+variant)
if not variant in all_params:
print("Parameters for %s doesn't exist in 'all_params', skipping." % variant)
continue
ModelName = all_params[variant].modelName
Newdoc = FreeCAD.newDocument(ModelName)
App.setActiveDocument(ModelName)
Gui.ActiveDocument=Gui.getDocument(ModelName)
case, pins = make_chip(all_params[variant])
color_attr=case_color+(0,)
show(case, color_attr)
#FreeCAD.Console.PrintMessage(pins_color)
color_attr=pins_color+(0,)
#FreeCAD.Console.PrintMessage(color_attr)
show(pins, color_attr)
doc = FreeCAD.ActiveDocument
objs=GetListOfObjects(FreeCAD, doc)
FuseObjs_wColors(FreeCAD, FreeCADGui,
doc.Name, objs[0].Name, objs[1].Name)
doc.Label=ModelName
objs=GetListOfObjects(FreeCAD, doc)
objs[0].Label=ModelName
restore_Main_Tools()
#rotate if required
if (all_params[variant].rotation!=0):
rot= all_params[variant].rotation
z_RotateObject(doc, rot)
script_dir=os.path.dirname(os.path.realpath(__file__))
out_dir=script_dir+destination_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#out_dir="./generated_qfp/"
# export STEP model
exportSTEP(doc,ModelName,out_dir)
# scale and export Vrml model
scale=0.3937001
exportVRML(doc,ModelName,scale,out_dir)
# Save the doc in Native FC format
saveFCdoc(App, Gui, doc, ModelName,out_dir)
#display BBox
FreeCADGui.ActiveDocument.getObject("Part__Feature").BoundingBox = True
## run()
| gpl-2.0 | 4,737,682,461,076,555,000 | 36.431718 | 123 | 0.60033 | false |
ujdhesa/unisubs | utils/forms/__init__.py | 1 | 4941 | import re
from django import forms
from django.core import validators
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from utils.forms.recapcha import ReCaptchaField
from utils.validators import UniSubURLValidator
assert ReCaptchaField # Shut up, Pyflakes.
class AjaxForm(object):
def get_errors(self):
output = {}
for key, value in self.errors.items():
output[key] = '/n'.join([force_unicode(i) for i in value])
return output
class StripRegexField(forms.RegexField):
def to_python(self, value):
value = super(StripRegexField, self).to_python(value)
return value.strip()
class StripURLField(forms.URLField):
def to_python(self, value):
value = super(StripURLField, self).to_python(value)
return value.strip()
class FeedURLValidator(validators.URLValidator):
regex = re.compile(
r'^(?:(?:https?)|(?:feed))://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
class FeedURLField(forms.URLField):
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
forms.CharField.__init__(self,max_length, min_length, *args,
**kwargs)
self.validators.append(FeedURLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
value = super(FeedURLField, self).to_python(value)
return value.strip()
class UniSubURLField(StripURLField):
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(forms.URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(UniSubURLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
class ListField(forms.RegexField):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(ListField, self).__init__(self.pattern, max_length, min_length, *args, **kwargs)
def clean(self, value):
if value:
value = value and value.endswith(',') and value or value+','
value = value.replace(' ', '')
value = super(ListField, self).clean(value)
return [item for item in value.strip(',').split(',') if item]
email_list_re = re.compile(
r"""^(([-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*")@(?:[A-Z0-9]+(?:-*[A-Z0-9]+)*\.)+[A-Z]{2,6},)+$""", re.IGNORECASE)
class EmailListField(ListField):
default_error_messages = {
'invalid': _(u'Enter valid e-mail addresses separated by commas.')
}
pattern = email_list_re
username_list_re = re.compile(r'^([A-Z0-9]+,)+$', re.IGNORECASE)
class UsernameListField(ListField):
default_error_messages = {
'invalid': _(u'Enter valid usernames separated by commas. Username can contain only a-z, A-Z and 0-9.')
}
pattern = username_list_re
class ErrorableModelForm(forms.ModelForm):
"""This class simply adds a single method to the standard one: add_error.
When performing validation in a clean() method you may want to add an error
message to a single field, instead of to non_field_errors. There's a lot of
silly stuff you need to do to make that happen, so add_error() takes care of
it for you.
"""
def add_error(self, message, field_name=None, cleaned_data=None):
"""Add the given error message to the given field.
If no field is given, a standard forms.ValidationError will be raised.
If a field is given, the cleaned_data dictionary must also be given to
keep Django happy.
If a field is given an exception will NOT be raised, so it's up to you
to stop processing if appropriate.
"""
if not field_name:
raise forms.ValidationError(message)
if field_name not in self._errors:
self._errors[field_name] = self.error_class()
self._errors[field_name].append(message)
try:
del cleaned_data[field_name]
except KeyError:
pass
def flatten_errorlists(errorlists):
'''Return a list of the errors (just the text) in any field.'''
errors = []
for field, errorlist in errorlists.items():
label = '' if field == '__all__' else ('%s: ' % field)
errors += ['%s%s' % (label, error) for error in errorlist]
return errors
| agpl-3.0 | -4,802,996,464,406,440,000 | 37.007692 | 216 | 0.619713 | false |
DXCanas/kolibri | kolibri/core/logger/serializers.py | 1 | 6061 | from django.db.models import Sum
from django.utils.timezone import now
from le_utils.constants import exercises
from rest_framework import serializers
from kolibri.core.auth.models import FacilityUser
from kolibri.core.logger.constants.exercise_attempts import MAPPING
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.logger.models import UserSessionLog
from kolibri.core.serializers import KolibriModelSerializer
class ContentSessionLogSerializer(KolibriModelSerializer):
extra_fields = serializers.JSONField(default='{}')
class Meta:
model = ContentSessionLog
fields = ('id', 'user', 'content_id', 'channel_id', 'start_timestamp',
'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
class ExamLogSerializer(KolibriModelSerializer):
progress = serializers.SerializerMethodField()
score = serializers.SerializerMethodField()
def get_progress(self, obj):
return obj.attemptlogs.values_list('item').distinct().count()
def get_score(self, obj):
return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')
class Meta:
model = ExamLog
fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')
read_only_fields = ('completion_timestamp', )
def update(self, instance, validated_data):
# This has changed, set the completion timestamp
if validated_data.get('closed') and not instance.closed:
instance.completion_timestamp = now()
return super(ExamLogSerializer, self).update(instance, validated_data)
class MasteryLogSerializer(KolibriModelSerializer):
pastattempts = serializers.SerializerMethodField()
totalattempts = serializers.SerializerMethodField()
mastery_criterion = serializers.JSONField(default='{}')
update_fields = ('pastattempts', )
class Meta:
model = MasteryLog
fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',
'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
def get_pastattempts(self, obj):
mastery_criterion = obj.mastery_criterion
exercise_type = mastery_criterion.get('type')
attemptlogs = AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog) \
.values('correct', 'hinted', 'error') \
.order_by('-start_timestamp')
# get the first x logs depending on the exercise type
if exercise_type == exercises.M_OF_N:
return attemptlogs[:mastery_criterion['n']]
elif MAPPING.get(exercise_type):
return attemptlogs[:MAPPING.get(exercise_type)]
else:
return attemptlogs[:10]
def get_totalattempts(self, obj):
return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
class AttemptLogSerializer(KolibriModelSerializer):
answer = serializers.JSONField(default='{}')
interaction_history = serializers.JSONField(default='[]')
class Meta:
model = AttemptLog
fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history', 'error')
class ExamAttemptLogSerializer(KolibriModelSerializer):
answer = serializers.JSONField(default='{}', allow_null=True)
interaction_history = serializers.JSONField(default='[]')
class Meta:
model = ExamAttemptLog
fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',
'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
def validate(self, data):
# Only do this validation when both are being set
# not necessary on PATCH, for example
if data.get('examlog') and data.get('user'):
try:
if data['examlog'].user != data['user']:
raise serializers.ValidationError('User field and user for related exam log are not the same')
except ExamLog.DoesNotExist:
raise serializers.ValidationError('Invalid exam log')
return data
class ContentSummaryLogSerializer(KolibriModelSerializer):
currentmasterylog = serializers.SerializerMethodField()
extra_fields = serializers.JSONField(default='{}')
update_fields = ()
class Meta:
model = ContentSummaryLog
fields = ('id', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
def get_currentmasterylog(self, obj):
try:
current_log = obj.masterylogs.latest('end_timestamp')
return MasteryLogSerializer(current_log).data
except MasteryLog.DoesNotExist:
return None
class UserSessionLogSerializer(KolibriModelSerializer):
update_fields = ()
class Meta:
model = UserSessionLog
fields = ('id', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
class TotalContentProgressSerializer(serializers.ModelSerializer):
progress = serializers.SerializerMethodField()
class Meta:
model = FacilityUser
fields = ('progress', 'id')
def get_progress(self, obj):
return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')
| mit | 3,028,567,171,412,789,000 | 39.139073 | 140 | 0.670516 | false |
AdamWill/anaconda | pyanaconda/storage_utils.py | 1 | 25447 | #
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""UI-independent storage utility functions"""
import re
import locale
import os
from contextlib import contextmanager
from blivet import arch
from blivet import util
from blivet import udev
from blivet.size import Size
from blivet.errors import StorageError
from blivet.platform import platform as _platform
from blivet.devicefactory import DEVICE_TYPE_LVM
from blivet.devicefactory import DEVICE_TYPE_LVM_THINP
from blivet.devicefactory import DEVICE_TYPE_BTRFS
from blivet.devicefactory import DEVICE_TYPE_MD
from blivet.devicefactory import DEVICE_TYPE_PARTITION
from blivet.devicefactory import DEVICE_TYPE_DISK
from pyanaconda.i18n import _, N_
from pyanaconda import isys
from pyanaconda.constants import productName
from pyanaconda.errors import errorHandler, ERROR_RAISE
from pykickstart.constants import AUTOPART_TYPE_PLAIN, AUTOPART_TYPE_BTRFS
from pykickstart.constants import AUTOPART_TYPE_LVM, AUTOPART_TYPE_LVM_THINP
import logging
log = logging.getLogger("anaconda")
# TODO: all those constants and mappings should go to blivet
DEVICE_TEXT_LVM = N_("LVM")
DEVICE_TEXT_LVM_THINP = N_("LVM Thin Provisioning")
DEVICE_TEXT_MD = N_("RAID")
DEVICE_TEXT_PARTITION = N_("Standard Partition")
DEVICE_TEXT_BTRFS = N_("Btrfs")
DEVICE_TEXT_DISK = N_("Disk")
DEVICE_TEXT_MAP = {DEVICE_TYPE_LVM: DEVICE_TEXT_LVM,
DEVICE_TYPE_MD: DEVICE_TEXT_MD,
DEVICE_TYPE_PARTITION: DEVICE_TEXT_PARTITION,
DEVICE_TYPE_BTRFS: DEVICE_TEXT_BTRFS,
DEVICE_TYPE_LVM_THINP: DEVICE_TEXT_LVM_THINP,
DEVICE_TYPE_DISK: DEVICE_TEXT_DISK}
PARTITION_ONLY_FORMAT_TYPES = ("macefi", "prepboot", "biosboot", "appleboot")
MOUNTPOINT_DESCRIPTIONS = {"Swap": N_("The 'swap' area on your computer is used by the operating\n"
"system when running low on memory."),
"Boot": N_("The 'boot' area on your computer is where files needed\n"
"to start the operating system are stored."),
"Root": N_("The 'root' area on your computer is where core system\n"
"files and applications are stored."),
"Home": N_("The 'home' area on your computer is where all your personal\n"
"data is stored."),
"BIOS Boot": N_("The BIOS boot partition is required to enable booting\n"
"from GPT-partitioned disks on BIOS hardware."),
"PReP Boot": N_("The PReP boot partition is required as part of the\n"
"boot loader configuration on some PPC platforms.")
}
AUTOPART_CHOICES = ((N_("Standard Partition"), AUTOPART_TYPE_PLAIN),
(N_("Btrfs"), AUTOPART_TYPE_BTRFS),
(N_("LVM"), AUTOPART_TYPE_LVM),
(N_("LVM Thin Provisioning"), AUTOPART_TYPE_LVM_THINP))
AUTOPART_DEVICE_TYPES = {AUTOPART_TYPE_LVM: DEVICE_TYPE_LVM,
AUTOPART_TYPE_LVM_THINP: DEVICE_TYPE_LVM_THINP,
AUTOPART_TYPE_PLAIN: DEVICE_TYPE_PARTITION,
AUTOPART_TYPE_BTRFS: DEVICE_TYPE_BTRFS}
NAMED_DEVICE_TYPES = (DEVICE_TYPE_BTRFS, DEVICE_TYPE_LVM, DEVICE_TYPE_MD, DEVICE_TYPE_LVM_THINP)
CONTAINER_DEVICE_TYPES = (DEVICE_TYPE_LVM, DEVICE_TYPE_BTRFS, DEVICE_TYPE_LVM_THINP)
udev_device_dict_cache = None
def size_from_input(input_str, units=None):
""" Get a Size object from an input string.
:param str input_str: a string forming some representation of a size
:param units: use these units if none specified in input_str
:type units: str or NoneType
:returns: a Size object corresponding to input_str
:rtype: :class:`blivet.size.Size` or NoneType
Units default to bytes if no units in input_str or units.
"""
if not input_str:
# Nothing to parse
return None
# A string ending with a digit contains no units information.
if re.search(r'[\d.%s]$' % locale.nl_langinfo(locale.RADIXCHAR), input_str):
input_str += units or ""
try:
size = Size(input_str)
except ValueError:
return None
return size
def device_type_from_autopart(autopart_type):
"""Get device type matching the given autopart type."""
return AUTOPART_DEVICE_TYPES.get(autopart_type, None)
class UIStorageFilter(logging.Filter):
"""Logging filter for UI storage events"""
def filter(self, record):
record.name = "storage.ui"
return True
@contextmanager
def ui_storage_logger():
"""Context manager that applies the UIStorageFilter for its block"""
storage_log = logging.getLogger("blivet")
storage_filter = UIStorageFilter()
storage_log.addFilter(storage_filter)
yield
storage_log.removeFilter(storage_filter)
class SanityException(Exception):
pass
class SanityError(SanityException):
pass
class SanityWarning(SanityException):
pass
class LUKSDeviceWithoutKeyError(SanityError):
pass
def sanity_check(storage, min_ram=isys.MIN_RAM):
"""
Run a series of tests to verify the storage configuration.
This function is called at the end of partitioning so that
we can make sure you don't have anything silly (like no /,
a really small /, etc).
:param storage: an instance of the :class:`blivet.Blivet` class to check
:param min_ram: minimum RAM (in MiB) needed for the installation with swap
space available
:rtype: a list of SanityExceptions
:return: a list of accumulated errors and warnings
"""
exns = []
checkSizes = [('/usr', Size("250 MiB")), ('/tmp', Size("50 MiB")), ('/var', Size("384 MiB")),
('/home', Size("100 MiB")), ('/boot', Size("200 MiB"))]
mustbeonlinuxfs = ['/', '/var', '/tmp', '/usr', '/home', '/usr/share', '/usr/lib']
mustbeonroot = ['/bin', '/dev', '/sbin', '/etc', '/lib', '/root', '/mnt', 'lost+found', '/proc']
filesystems = storage.mountpoints
root = storage.fsset.root_device
swaps = storage.fsset.swap_devices
if root:
if root.size < Size("250 MiB"):
exns.append(
SanityWarning(_("Your root partition is less than 250 "
"megabytes which is usually too small to "
"install %s.") % (productName,)))
else:
exns.append(
SanityError(_("You have not defined a root partition (/), "
"which is required for installation of %s "
"to continue.") % (productName,)))
# Prevent users from installing on s390x with (a) no /boot volume, (b) the
# root volume on LVM, and (c) the root volume not restricted to a single
# PV
# NOTE: There is not really a way for users to create a / volume
# restricted to a single PV. The backend support is there, but there are
# no UI hook-ups to drive that functionality, but I do not personally
# care. --dcantrell
if arch.is_s390() and '/boot' not in storage.mountpoints and root:
if root.type == 'lvmlv' and not root.single_pv:
exns.append(
SanityError(_("This platform requires /boot on a dedicated "
"partition or logical volume. If you do not "
"want a /boot volume, you must place / on a "
"dedicated non-LVM partition.")))
# FIXME: put a check here for enough space on the filesystems. maybe?
for (mount, size) in checkSizes:
if mount in filesystems and filesystems[mount].size < size:
exns.append(
SanityWarning(_("Your %(mount)s partition is less than "
"%(size)s which is lower than recommended "
"for a normal %(productName)s install.")
% {'mount': mount, 'size': size,
'productName': productName}))
# storage.mountpoints is a property that returns a new dict each time, so
# iterating over it is thread-safe.
for (mount, device) in filesystems.items():
problem = filesystems[mount].check_size()
if problem < 0:
exns.append(
SanityError(_("Your %(mount)s partition is too small for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"mount": mount, "format": device.format.name,
"minSize": device.min_size, "maxSize": device.max_size}))
elif problem > 0:
exns.append(
SanityError(_("Your %(mount)s partition is too large for %(format)s formatting "
"(allowable size is %(minSize)s to %(maxSize)s)")
% {"mount":mount, "format": device.format.name,
"minSize": device.min_size, "maxSize": device.max_size}))
if storage.bootloader and not storage.bootloader.skip_bootloader:
stage1 = storage.bootloader.stage1_device
if not stage1:
exns.append(
SanityError(_("No valid boot loader target device found. "
"See below for details.")))
pe = _platform.stage1_missing_error
if pe:
exns.append(SanityError(_(pe)))
else:
storage.bootloader.is_valid_stage1_device(stage1)
exns.extend(SanityError(msg) for msg in storage.bootloader.errors)
exns.extend(SanityWarning(msg) for msg in storage.bootloader.warnings)
stage2 = storage.bootloader.stage2_device
if stage1 and not stage2:
exns.append(SanityError(_("You have not created a bootable partition.")))
else:
storage.bootloader.is_valid_stage2_device(stage2)
exns.extend(SanityError(msg) for msg in storage.bootloader.errors)
exns.extend(SanityWarning(msg) for msg in storage.bootloader.warnings)
if not storage.bootloader.check():
exns.extend(SanityError(msg) for msg in storage.bootloader.errors)
#
# check that GPT boot disk on BIOS system has a BIOS boot partition
#
if _platform.weight(fstype="biosboot") and \
stage1 and stage1.is_disk and \
getattr(stage1.format, "labelType", None) == "gpt":
missing = True
for part in [p for p in storage.partitions if p.disk == stage1]:
if part.format.type == "biosboot":
missing = False
break
if missing:
exns.append(
SanityError(_("Your BIOS-based system needs a special "
"partition to boot from a GPT disk label. "
"To continue, please create a 1MiB "
"'biosboot' type partition.")))
if not swaps:
installed = util.total_memory()
required = Size("%s MiB" % (min_ram + isys.NO_SWAP_EXTRA_RAM))
if installed < required:
exns.append(
SanityError(_("You have not specified a swap partition. "
"%(requiredMem)s of memory is required to continue installation "
"without a swap partition, but you only have %(installedMem)s.")
% {"requiredMem": required,
"installedMem": installed}))
else:
exns.append(
SanityWarning(_("You have not specified a swap partition. "
"Although not strictly required in all cases, "
"it will significantly improve performance "
"for most installations.")))
no_uuid = [s for s in swaps if s.format.exists and not s.format.uuid]
if no_uuid:
exns.append(
SanityWarning(_("At least one of your swap devices does not have "
"a UUID, which is common in swap space created "
"using older versions of mkswap. These devices "
"will be referred to by device path in "
"/etc/fstab, which is not ideal since device "
"paths can change under a variety of "
"circumstances. ")))
for (mountpoint, dev) in filesystems.items():
if mountpoint in mustbeonroot:
exns.append(
SanityError(_("This mount point is invalid. The %s directory must "
"be on the / file system.") % mountpoint))
if mountpoint in mustbeonlinuxfs and (not dev.format.mountable or not dev.format.linux_native):
exns.append(
SanityError(_("The mount point %s must be on a linux file system.") % mountpoint))
if storage.root_device and storage.root_device.format.exists:
e = storage.must_format(storage.root_device)
if e:
exns.append(SanityError(e))
exns += verify_LUKS_devices_have_key(storage)
exns += check_mounted_partitions(storage)
return exns
def verify_LUKS_devices_have_key(storage):
"""
Verify that all non-existant LUKS devices have some way of obtaining
a key.
Note: LUKS device creation will fail without a key.
:rtype: generator of str
:returns: a generator of error messages, may yield no error messages
"""
for dev in (d for d in storage.devices if \
d.format.type == "luks" and \
not d.format.exists and \
not d.format.has_key):
yield LUKSDeviceWithoutKeyError(_("Encryption requested for LUKS device %s but no encryption key specified for this device.") % (dev.name,))
def check_mounted_partitions(storage):
""" Check the selected disks to make sure all their partitions are unmounted.
:rtype: generator of str
:returns: a generator of error messages, may yield no error messages
"""
for disk in storage.disks:
if not disk.partitioned:
continue
for part in disk.format.partitions:
part_dev = storage.devicetree.get_device_by_path(part.path)
if part_dev and part_dev.protected:
log.debug("Not checking protected %s for being mounted, assuming live image mount", part.path)
continue
if part.busy:
yield SanityError(_("%s is currently mounted and cannot be used for the "
"installation. Please unmount it and retry.") % part.path)
def bound_size(size, device, old_size):
""" Returns a size bounded by the maximum and minimum size for
the device.
:param size: the candidate size
:type size: :class:`blivet.size.Size`
:param device: the device being displayed
:type device: :class:`blivet.devices.StorageDevice`
:param old_size: the fallback size
:type old_size: :class:`blivet.size.Size`
:returns: a size to which to set the device
:rtype: :class:`blivet.size.Size`
If size is 0, interpreted as set size to maximum possible.
If no maximum size is available, reset size to old_size, but
log a warning.
"""
max_size = device.max_size
min_size = device.min_size
if not size:
if max_size:
log.info("No size specified, using maximum size for this device (%d).", max_size)
size = max_size
else:
log.warning("No size specified and no maximum size available, setting size back to original size (%d).", old_size)
size = old_size
else:
if max_size:
if size > max_size:
log.warning("Size specified (%d) is greater than the maximum size for this device (%d), using maximum size.", size, max_size)
size = max_size
else:
log.warning("Unknown upper bound on size. Using requested size (%d).", size)
if size < min_size:
log.warning("Size specified (%d) is less than the minimum size for this device (%d), using minimum size.", size, min_size)
size = min_size
return size
def try_populate_devicetree(devicetree):
"""
Try to populate the given devicetree while catching errors and dealing with
some special ones in a nice way (giving user chance to do something about
them).
:param devicetree: devicetree to try to populate
:type decicetree: :class:`blivet.devicetree.DeviceTree`
"""
while True:
try:
devicetree.populate()
except StorageError as e:
if errorHandler.cb(e) == ERROR_RAISE:
raise
else:
continue
else:
break
return
class StorageSnapshot(object):
"""R/W snapshot of storage (i.e. a :class:`blivet.Blivet` instance)"""
def __init__(self, storage=None):
"""
Create new instance of the class
:param storage: if given, its snapshot is created
:type storage: :class:`blivet.Blivet`
"""
if storage:
self._storage_snap = storage.copy()
else:
self._storage_snap = None
@property
def storage(self):
return self._storage_snap
@property
def created(self):
return bool(self._storage_snap)
def create_snapshot(self, storage):
"""Create (and save) snapshot of storage"""
self._storage_snap = storage.copy()
def dispose_snapshot(self):
"""
Dispose (unref) the snapshot
.. note::
In order to free the memory taken by the snapshot, all references
returned by :property:`self.storage` have to be unrefed too.
"""
self._storage_snap = None
def reset_to_snapshot(self, storage, dispose=False):
"""
Reset storage to snapshot (**modifies :param:`storage` in place**)
:param storage: :class:`blivet.Blivet` instance to reset to the created snapshot
:param bool dispose: whether to dispose the snapshot after reset or not
:raises ValueError: if no snapshot is available (was not created before)
"""
if not self.created:
raise ValueError("No snapshot created, cannot reset")
# we need to create a new copy from the snapshot first -- simple
# assignment from the snapshot would result in snapshot being modified
# by further changes of 'storage'
new_copy = self._storage_snap.copy()
storage.devicetree = new_copy.devicetree
storage.roots = new_copy.roots
storage.fsset = new_copy.fsset
if dispose:
self.dispose_snapshot()
# a snapshot of early storage as we got it from scanning disks without doing any
# changes
on_disk_storage = StorageSnapshot()
def filter_unsupported_disklabel_devices(devices):
""" Return input list minus any devices that exist on an unsupported disklabel. """
return [d for d in devices
if not any(not getattr(p, "disklabel_supported", True) for p in d.ancestors)]
def device_name_is_disk(device_name, devicetree=None, refresh_udev_cache=False):
"""Report if the given device name corresponds to a disk device.
Check if the device name is a disk device or not. This function uses
the provided Blivet devicetree for the checking and Blivet udev module
if no devicetree is provided.
Please note that the udev based check uses an internal cache that is generated
when this function is first called in the udev checking mode. This basically
means that udev devices added later will not be taken into account.
If this is a problem for your usecase then use the refresh_udev_cache option
to force a refresh of the udev cache.
:param str device_name: name of the device to check
:param devicetree: device tree to look up devices in (optional)
:type devicetree: :class:`blivet.DeviceTree`
:param bool refresh_udev_cache: governs if the udev device cache should be refreshed
:returns: True if the device name corresponds to a disk, False if not
:rtype: bool
"""
if devicetree is None:
global udev_device_dict_cache
if device_name:
if udev_device_dict_cache is None or refresh_udev_cache:
# Lazy load the udev dick that contains the {device_name : udev_device,..,}
# mappings. The operation could be quite costly due to udev_settle() calls,
# so we cache it in this non-elegant way.
# An unfortunate side effect of this is that udev devices that show up after
# this function is called for the first time will not be taken into account.
udev_device_dict_cache = {udev.device_get_name(d): d for d in udev.get_devices()}
udev_device = udev_device_dict_cache.get(device_name)
return udev_device and udev.device_is_realdisk(udev_device)
else:
return False
else:
device = devicetree.get_device_by_name(device_name)
return device and device.is_disk
def device_matches(spec, devicetree=None, disks_only=False):
"""Return names of block devices matching the provided specification.
:param str spec: a device identifier (name, UUID=<uuid>, &c)
:keyword devicetree: device tree to look up devices in (optional)
:type devicetree: :class:`blivet.DeviceTree`
:param bool disks_only: if only disk devices matching the spec should be returned
:returns: names of matching devices
:rtype: list of str
The spec can contain multiple "sub specs" delimited by a |, for example:
"sd*|hd*|vd*"
In such case we resolve the specs from left to right and return all
unique matches, for example:
["sda", "sda1", "sda2", "sdb", "sdb1", "vdb"]
If disks_only is specified we only return
disk devices matching the spec. For the example above
the output with disks_only=True would be:
["sda", "sdb", "vdb"]
Also note that parse methods will not have access to a devicetree, while execute
methods will. The devicetree is superior in that it can resolve md
array names and in that it reflects scheduled device removals, but for
normal local disks udev.resolve_devspec should suffice.
"""
matches = []
# the device specifications might contain multiple "sub specs" separated by a |
# - the specs are processed from left to right
for single_spec in spec.split("|"):
full_spec = single_spec
if not full_spec.startswith("/dev/"):
full_spec = os.path.normpath("/dev/" + full_spec)
# the regular case
single_spec_matches = udev.resolve_glob(full_spec)
for match in single_spec_matches:
if match not in matches:
# skip non-disk devices in disk-only mode
if disks_only and not device_name_is_disk(match):
continue
matches.append(match)
dev_name = None
# Use spec here instead of full_spec to preserve the spec and let the
# called code decide whether to treat the spec as a path instead of a name.
if devicetree is None:
# we run the spec through resolve_devspec() here as unlike resolve_glob()
# it can also resolve labels and UUIDs
dev_name = udev.resolve_devspec(single_spec)
if disks_only and dev_name:
if not device_name_is_disk(dev_name):
dev_name = None # not a disk
else:
# devicetree can also handle labels and UUIDs
device = devicetree.resolve_device(single_spec)
if device:
dev_name = device.name
if disks_only and not device_name_is_disk(dev_name, devicetree=devicetree):
dev_name = None # not a disk
# The dev_name variable can be None if the spec is not not found or is not valid,
# but we don't want that ending up in the list.
if dev_name and dev_name not in matches:
matches.append(dev_name)
return matches
| gpl-2.0 | -1,749,860,290,309,111,600 | 40.377236 | 148 | 0.614807 | false |
balihoo-gens/blambda | blambda/utils/findfunc.py | 1 | 2897 | import os
import glob
import json
import re
from subprocess import check_output, CalledProcessError
from blambda.utils.base import pGreen, pRed, pBlue, pYellow, spawn
import time
from pprint import pprint
try:
import boto3
except ImportError:
print("Unable to import boto")
def all_json_files(root):
return [os.path.join(r, f) for r, _, fs in os.walk(root) for f in fs if f.endswith('.json')]
def split_path(path):
(basedir, jsonfile) = os.path.split(path)
(name, ext) = os.path.splitext(jsonfile)
return basedir, name, ext
def find_manifest(pkgname, srcdir="."):
return find_manifests([pkgname]).get(pkgname)
def find_manifests(pkgnames, verbose=True):
""" return a dictionary keyed by pkgname with the found manifest's full path """
(abspath, dirname) = (os.path.abspath, os.path.dirname)
(ret,stdout,stderr) = spawn("git rev-parse --show-toplevel")
root = stdout[0] if ret == 0 else os.getcwd()
jsonfiles = all_json_files(root)
def ensure_json(pkgname):
return pkgname if pkgname.endswith(".json") else "{}.json".format(pkgname)
def match(pkg, jsonfile):
return jsonfile.endswith(ensure_json(pkg)) and is_manifest(jsonfile, verbose)
return {p:j for p in pkgnames for j in jsonfiles if match(p,j)}
def get_runtime(fname):
manifest_file = find_manifest(fname)
def is_manifest(path, verbose=True, raise_on_bad_json=False):
try:
#hacky exclusions of files over 10k
if os.path.getsize(path) < 10000:
with open(path) as f:
manifest = None
try:
manifest = json.load(f)
except ValueError as e:
msg = "{} is not valid json: {}".format(path, e)
if raise_on_bad_json:
raise Exception(msg)
elif verbose:
print(pRed(msg))
return type(manifest) == dict and manifest.get('blambda') == "manifest"
except OSError as e:
if verbose:
print(pRed("unhandled exception processing {}".format(path)))
return False
def all_manifests(srcdir, verbose=0):
""" find all paths containing a package file """
paths = all_json_files(srcdir)
manifests = []
for path in paths:
if is_manifest(path, verbose=verbose, raise_on_bad_json=True):
manifests.append(split_path(path)[1])
return sorted(manifests)
def all_remote_functions(region="us-east-1"):
lmb = boto3.client('lambda', region_name=region)
functions = {}
def getfs(marker=None):
lf = lmb.list_functions
response = lf(Marker=marker) if marker else lf()
functions.update({ f['FunctionName']: f['Description'] for f in response['Functions'] })
if 'NextMarker' in response:
getfs(response['NextMarker'])
getfs()
return functions
| mit | 7,985,033,497,449,660,000 | 34.329268 | 96 | 0.626855 | false |
CelineBoudier/rapid-router | game/views/level.py | 1 | 12457 | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2016, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from __future__ import division
import json
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.template import RequestContext
from django.utils import timezone
from django.utils.safestring import mark_safe
import game.level_management as level_management
import game.messages as messages
import game.permissions as permissions
from game import app_settings
from game.cache import cached_default_level, cached_episode, \
cached_custom_level, cached_level_decor, cached_level_blocks
from game.models import Level, Attempt, Workspace
from helper import renderError
from game.decor import get_decor_element
def play_custom_level_from_editor(request, levelId):
return play_custom_level(request, levelId, from_editor=True)
def play_custom_level(request, levelId, from_editor=False):
level = cached_custom_level(levelId)
if level.default:
raise Http404
return play_level(request, level, from_editor)
def play_default_level(request, levelName):
level = cached_default_level(levelName)
return play_level(request, level)
def _next_level_url(level, night_mode):
if not level.next_level:
return ''
return _level_url(level.next_level, night_mode)
def add_night(url, night_mode):
if night_mode:
return url + "?night=1"
return url
def _level_url(level, night_mode):
if level.default:
result = _default_level_url(level)
else:
result = _custom_level_url(level)
return add_night(result, night_mode)
def _default_level_url(level):
return reverse('play_default_level', args=[level.name])
def _custom_level_url(level):
return reverse('play_custom_level', args=[level.id])
def play_level(request, level, from_editor=False):
""" Loads a level for rendering in the game.
**Context**
``RequestContext``
``level``
Level that is about to be played. An instance of :model:`game.Level`.
``blocks``
Blocks that are available during the game. List of :model:`game.Block`.
``lesson``
Instruction shown at the load of the level. String from `game.messages`.
``hint``
Hint shown after a number of failed attempts. String from `game.messages`.
**Template:**
:template:`game/game.html`
"""
night_mode = False if not app_settings.NIGHT_MODE_FEATURE_ENABLED else 'night' in request.GET
if not permissions.can_play_level(request.user, level, app_settings.EARLY_ACCESS_FUNCTION(request)):
return renderError(request, messages.noPermissionTitle(), messages.notSharedLevel())
# Set default level description/hint lookups
lesson = 'description_level_default'
hint = 'hint_level_default'
# If it's one of our levels, set level description/hint lookups
# to point to what they should be
if level.default:
lesson = 'description_level' + str(level.name)
hint = 'hint_level' + str(level.name)
# Try to get the relevant message, and fall back on defaults
try:
lessonCall = getattr(messages, lesson)
hintCall = getattr(messages, hint)
except AttributeError:
lessonCall = messages.description_level_default
hintCall = messages.hint_level_default
lesson = mark_safe(lessonCall())
hint = mark_safe(hintCall())
house = get_decor_element('house', level.theme).url
cfc = get_decor_element('cfc', level.theme).url
background = get_decor_element('tile1', level.theme).url
character = level.character
workspace = None
python_workspace = None
if not request.user.is_anonymous() and hasattr(request.user.userprofile, 'student'):
student = request.user.userprofile.student
attempt = Attempt.objects \
.filter(level=level, student=student, finish_time__isnull=True, night_mode=night_mode) \
.order_by('-start_time') \
.first()
if not attempt:
attempt = Attempt(level=level, student=student, score=None, night_mode=night_mode)
fetch_workspace_from_last_attempt(attempt)
attempt.save()
else:
attempt = close_and_reset(attempt)
workspace = attempt.workspace
python_workspace = attempt.python_workspace
decor_data = cached_level_decor(level)
character_url = character.top_down
character_width = character.width
character_height = character.height
wreckage_url = 'van_wreckage.svg'
if night_mode:
block_data = level_management.get_night_blocks(level)
night_mode_javascript = "true"
lesson = messages.title_night_mode()
model_solution = '[]'
else:
block_data = cached_level_blocks(level)
night_mode_javascript = "false"
model_solution = level.model_solution
return_view = 'level_editor' if from_editor else 'levels'
context = RequestContext(request, {
'level': level,
'lesson': lesson,
'blocks': block_data,
'decor': decor_data,
'character': character,
'background': background,
'house': house,
'cfc': cfc,
'hint': hint,
'workspace': workspace,
'python_workspace': python_workspace,
'return_url': reverse(return_view),
'character_url': character_url,
'character_width': character_width,
'character_height': character_height,
'wreckage_url': wreckage_url,
'night_mode': night_mode_javascript,
'night_mode_feature_enabled': str(app_settings.NIGHT_MODE_FEATURE_ENABLED).lower(),
'model_solution': model_solution,
'next_level_url': _next_level_url(level, night_mode),
'flip_night_mode_url': _level_url(level, not night_mode),
})
return render(request, 'game/game.html', context_instance=context)
def fetch_workspace_from_last_attempt(attempt):
latest_attempt = Attempt.objects \
.filter(level=attempt.level, student=attempt.student, night_mode=attempt.night_mode) \
.order_by('-start_time') \
.first()
if latest_attempt:
attempt.workspace = latest_attempt.workspace
attempt.python_workspace = latest_attempt.python_workspace
def delete_level(request, levelID):
success = False
level = Level.objects.get(id=levelID)
if permissions.can_delete_level(request.user, level):
level_management.delete_level(level)
success = True
return HttpResponse(json.dumps({'success': success}), content_type='application/javascript')
def submit_attempt(request):
""" Processes a request on submission of the program solving the current level. """
if (not request.user.is_anonymous() and request.method == 'POST' and
hasattr(request.user.userprofile, "student")):
level = get_object_or_404(Level, id=request.POST.get('level', 1))
student = request.user.userprofile.student
attempt = Attempt.objects.filter(level=level, student=student, finish_time__isnull=True).first()
if attempt:
attempt.score = float(request.POST.get('score'))
attempt.workspace = request.POST.get('workspace')
attempt.workspace = request.POST.get('workspace')
attempt.python_workspace = request.POST.get('python_workspace')
record_best_attempt(attempt)
close_and_reset(attempt)
return HttpResponse('[]', content_type='application/json')
def record_best_attempt(attempt):
best_attempt = Attempt.objects \
.filter(level=attempt.level, student=attempt.student, night_mode=attempt.night_mode, is_best_attempt=True) \
.first()
if best_attempt and (best_attempt.score <= attempt.score):
best_attempt.is_best_attempt = False
best_attempt.save()
attempt.is_best_attempt = True
elif not best_attempt:
attempt.is_best_attempt = True
def close_and_reset(attempt):
attempt.finish_time = timezone.now()
attempt.save()
new_attempt = Attempt(level=attempt.level,
student=attempt.student,
score=None,
night_mode=attempt.night_mode,
workspace=attempt.workspace,
python_workspace=attempt.python_workspace)
new_attempt.save()
return new_attempt
def load_list_of_workspaces(request):
workspaces_owned = []
if permissions.can_create_workspace(request.user):
workspaces_owned = Workspace.objects.filter(owner=request.user)
workspaces = [{'id': workspace.id, 'name': workspace.name, 'blockly_enabled': workspace.blockly_enabled, 'python_enabled': workspace.python_enabled}
for workspace in workspaces_owned]
return HttpResponse(json.dumps(workspaces), content_type='application/json')
def load_workspace(request, workspaceID):
workspace = Workspace.objects.get(id=workspaceID)
if permissions.can_load_workspace(request.user, workspace):
return HttpResponse(json.dumps({'contents': workspace.contents,
'python_contents': workspace.python_contents}),
content_type='application/json')
return HttpResponse(json.dumps(''), content_type='application/json')
def save_workspace(request, workspaceID=None):
name = request.POST.get('name')
contents = request.POST.get('contents')
python_contents = request.POST.get('python_contents')
blockly_enabled = json.loads(request.POST.get('blockly_enabled'))
python_enabled = json.loads(request.POST.get('python_enabled'))
workspace = None
if workspaceID:
workspace = Workspace.objects.get(id=workspaceID)
elif permissions.can_create_workspace(request.user):
workspace = Workspace(owner=request.user)
if workspace and permissions.can_save_workspace(request.user, workspace):
workspace.name = name
workspace.contents = contents
workspace.python_contents = python_contents
workspace.blockly_enabled = blockly_enabled
workspace.python_enabled = python_enabled
workspace.save()
return load_list_of_workspaces(request)
def start_episode(request, episodeId):
episode = cached_episode(episodeId)
return play_level(request, episode.first_level, False)
def delete_workspace(request, workspaceID):
workspace = Workspace.objects.get(id=workspaceID)
if permissions.can_delete_workspace(request.user, workspace):
workspace.delete()
return load_list_of_workspaces(request)
| agpl-3.0 | 8,623,084,045,860,947,000 | 35.801775 | 152 | 0.685184 | false |
yadt/yadt-config-rpm-maker | test/unittests/unittest_support.py | 1 | 2118 | # coding=utf-8
#
# yadt-config-rpm-maker
# Copyright (C) 2011-2013 Immobilien Scout GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from StringIO import StringIO
from unittest import TestCase
class UnitTests(TestCase):
def assert_mock_never_called(self, mock_object):
mock_call_count = mock_object.call_count
error_message = "mock object should not be called,\n but has been called %d times:\n" % mock_call_count
for call in mock_object.call_args_list:
error_message += ' %s' % str(call)
self.assertEqual(0, mock_call_count, error_message)
def assert_is_instance_of(self, test_object, the_class):
error_message = 'The given object "{test_object}" is not a instance of {class_name}'.format(test_object=str(test_object),
class_name=the_class.__name__)
self.assertTrue(isinstance(test_object, the_class), error_message)
def create_fake_file(self, content=""):
"""
Creates a fake file-like object. Use this is if you have to mock away
@patch('__builtin__.open')
"""
class FakeFile(StringIO):
def __init__(self, content):
StringIO.__init__(self, content)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
return FakeFile(content)
| gpl-3.0 | -8,868,756,480,230,580,000 | 35.517241 | 130 | 0.617092 | false |
doctormo/django-cms-rosetta | cmsrosetta/tests/test_htmlutil.py | 1 | 2428 | #
# Copyright (C) 2015 Martin Owens <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Test the html parsing
"""
import unittest
import sys
sys.path.insert(0, '../')
try:
from test import test_support
except ImportError:
from test import support as test_support
from htmlutil import PotParser
class Html2PoTestCase(unittest.TestCase):
"""Test our custom html to po code."""
def setUp(self):
self.parser = PotParser()
def _t(self, body, result):
self.assertEqual(self.parser.parse(body), result)
# Non-splitting
test_01_plaintext = lambda self: self._t("No HTML in Text", {'': 'No HTML in Text'})
test_02_taginclude = lambda self: self._t("Some <b>HTML</b> Text", {'': 'Some <b>HTML</b> Text'})
test_03_tagattr = lambda self: self._t("A <b id=\"pk\">to</b> me", {'': "A <b id=\"pk\">to</b> me"})
test_04_tagclean = lambda self: self._t("A <b id='pk' >to</b > me", {'': "A <b id=\"pk\">to</b> me"})
test_05_escapes = lambda self: self._t("This has &le html", {'': "This has &le html"})
# Splitting tests
test_20_paragraphs = lambda self: self._t("<p>One</p><p>Two</p>", {'p-1': 'One', 'p-2': 'Two'})
test_21_divs = lambda self: self._t("<div>One</div><div>Two</div>", {'div-1': 'One', 'div-2': 'Two'})
test_22_levels = lambda self: self._t("<div>One<p>Two</p>Three</div>", {'div-1': 'One{{ div-p-1 }}Three', 'div-p-1': 'Two'})
test_23_anchor = lambda self: self._t("<p>Something <a href=''>Linked</a> here</p>", {'p-a-1': u'Linked', 'p-1': u'Something {{ p-a-1 }} here'})
test_24_image = lambda self: self._t("<p>An <img src='foo'/> goes here</p>", {'p-1': u'An {{ p-img-1 }} goes here'})
if __name__ == '__main__':
test_support.run_unittest( Html2PoTestCase )
| agpl-3.0 | 7,637,989,137,399,331,000 | 41.596491 | 152 | 0.628913 | false |
zamaudio/calf-LR4 | knobs/knob1.py | 1 | 4510 | #!/usr/bin/env python
import cairo
from math import pi, cos, sin
WIDTH, HEIGHT = 20, 20
background = "knob1_bg.png"
output = "knob1.png"
x, y = WIDTH / 2, HEIGHT / 2
lwidth = WIDTH / 10
radius = WIDTH / 2 - lwidth
radiusplus = radius + lwidth / 2
radiusminus = radius - lwidth / 2
radiusminus2 = radius - lwidth
radiusminus3 = radius - lwidth * 3 / 2
radiusint = (radius - lwidth / 2) * 0.25
value = 0.7
arrow = WIDTH / 10
phases = 65
# Setup Cairo
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, phases * WIDTH, HEIGHT * 4)
ctx = cairo.Context(surface)
ctx.set_source_rgba(0.75, 0.75, 0.75, 0)
ctx.rectangle(0, 0, phases * WIDTH, 4 * HEIGHT)
ctx.fill()
for variant in range(0, 4):
x = WIDTH / 2
y = HEIGHT * (variant + 0.5)
for phase in range(0, phases):
# Draw background image
bgimage = cairo.ImageSurface.create_from_png(background)
ctx.set_source_surface(bgimage, x - WIDTH / 2, y - HEIGHT / 2);
ctx.rectangle(phase * WIDTH, variant * HEIGHT, WIDTH, HEIGHT)
ctx.fill ();
# Draw out the triangle using absolute coordinates
value = phase * 1.0 / (phases - 1)
if variant != 3:
sangle = (180 - 45)*pi/180
eangle = (360 + 45)*pi/180
nleds = 31
else:
sangle = (270)*pi/180
eangle = (270 + 360)*pi/180
nleds = 32
vangle = sangle + value * (eangle - sangle)
c, s = cos(vangle), sin(vangle)
midled = (nleds - 1) / 2
midphase = (phases - 1) / 2
thresholdP = midled + 1 + ((phase - midphase - 1) * 1.0 * (nleds - midled - 2) / (phases - midphase - 2))
thresholdN = midled - 1 - ((midphase - 1 - phase) * 1.0 * (nleds - midled - 2) / (midphase - 1))
spacing = pi / nleds
for led in range(0, nleds):
if variant == 3:
adelta = (eangle - sangle) / (nleds)
else:
adelta = (eangle - sangle - spacing) / (nleds - 1)
lit = False
glowlit = False
glowval = 0.5
hilite = False
lvalue = led * 1.0 / (nleds - 1)
pvalue = phase * 1.0 / (phases - 1)
if variant == 3:
# XXXKF works only for phases = 2 * leds
exled = phase / 2.0
lit = led == exled or (phase == phases - 1 and led == 0)
glowlit = led == (exled + 0.5) or led == (exled - 0.5)
glowval = 0.8
hilite = (phase % ((phases - 1) / 4)) == 0
if variant == 0: lit = (pvalue == 1.0) or pvalue > lvalue
if variant == 1:
if led == midled:
lit = (phase == midphase)
#glowlit = (phase < midphase and thresholdN >= midled - 1) or (phase > midphase and thresholdP <= midled + 1)
glowlit = False
hilite = True
elif led > midled and phase > midphase:
# led = [midled + 1, nleds - 1]
# phase = [midphase + 1, phases - 1]
lit = led <= thresholdP
glowlit = led <= thresholdP + 1
glowval = 0.4
elif led < midled and phase < midphase:
lit = led >= thresholdN
glowlit = led >= thresholdN - 1
glowval = 0.4
else:
lit = False
if variant == 2: lit = pvalue == 0 or pvalue < lvalue
if not lit:
if not glowlit:
ctx.set_source_rgb(0.0, 0.1, 0.1)
else:
ctx.set_source_rgb(0.0 * glowval, 0.75 * glowval, 1.0 * glowval)
else:
if hilite:
ctx.set_source_rgb(0.3, 1.0, 1.0)
else:
ctx.set_source_rgb(0.0, 0.75, 1.0)
ctx.set_line_width(2)
if hilite:
ctx.set_line_width(3)
ctx.arc(x, y, radius, sangle + adelta * led, sangle + adelta * led + spacing)
ctx.stroke()
ctx.set_source_rgba(0, 0, 0, 1)
ctx.set_line_width(1.5)
mtx = ctx.get_matrix()
ctx.translate(x + radiusminus2 * c, y + radiusminus2 * s)
ctx.rotate(vangle)
ctx.move_to(0, 0)
ctx.line_to(-radius/5, 0)
ctx.stroke()
ctx.set_matrix(mtx)
x += WIDTH
# Output a PNG file
surface.write_to_png(output)
| lgpl-2.1 | 1,829,769,222,033,994,200 | 35.666667 | 129 | 0.484479 | false |
google/grr | grr/core/grr_response_core/lib/util/context.py | 1 | 1983 | #!/usr/bin/env python
"""A module with utilities for dealing with context managers."""
from typing import ContextManager
from typing import Generic
from typing import Sequence
from typing import TypeVar
_T = TypeVar("_T")
class NullContext(ContextManager[_T], Generic[_T]):
"""A context manager that always yields provided values.
This class is useful for providing context-like semantics for values that are
not context managers themselves because they do not need to manage any
resources but are used as context managers.
This is a backport of the `contextlib.nullcontext` class introduced in Python
3.7. Once support for old versions of Python is dropped, all uses of this
class should be replaced with the one provided by the standard library.
"""
def __init__(self, value: _T) -> None:
self._value = value
def __enter__(self) -> _T:
return self._value
def __exit__(self, exc_type, exc_value, traceback):
del exc_type, exc_value, traceback # Unused.
class MultiContext(ContextManager[Sequence[_T]], Generic[_T]):
"""A context managers that sequences multiple context managers.
This is similar to the monadic `sequence` operator: it takes a list of context
managers, enters each of them and yields list of values that the managers
yield.
One possible scenario where this class comes in handy is when one needs to
open multiple files.
"""
# TODO: `Collection` would be a better type here, but it is only
# available in Python 3.6+. Once support for Python 2 is dropped, this can be
# generalized.
def __init__(self, managers: Sequence[ContextManager[_T]]) -> None:
self._managers = managers
def __enter__(self) -> Sequence[_T]:
values = []
for manager in self._managers:
value = manager.__enter__()
values.append(value)
return values
def __exit__(self, exc_type, exc_value, traceback):
for manager in self._managers:
manager.__exit__(exc_type, exc_value, traceback)
| apache-2.0 | 707,296,354,064,719,400 | 32.05 | 80 | 0.709027 | false |
srujant/MLNews | searchFunction.py | 1 | 2473 | import requests
from ml import svm
import json
import NLProcessor as nlp
import lxml.html
from requests import get
from goose import Goose
def getSuggestions(query):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/suggestions/?q=' + query
headers = {'Ocp-Apim-Subscription-Key':'854e8088bb8347418e6f934b996487af'}
r = requests.get(url, headers = headers)
results = []
suggestions = r.json()['suggestionGroups']
max = 3
for suggestion in suggestions:
s = suggestion['searchSuggestions']
for term in s:
if max == 0:
break
max-=1
results.append(str(term['query'].encode("ascii", "ignore")))
return results
def manualSearch(query):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/news/search?q=' + query
# query string parameters
payload = {'q': query, 'freshness':'Week'}
# custom headers
headers = {'Ocp-Apim-Subscription-Key': '22207001cbdc4c2487ad91d1cec1bdf2'}
r = requests.get(url, params=payload, headers=headers)
links = []
descriptions = []
print(r.json())
try:
listOfArticles = r.json()['value']
except:
return []
max = 5
for article in listOfArticles:
if('clusteredArticles' in article):
information = article['clusteredArticles']
else:
information = article
thisList = []
if max == 0:
break
max-=1
if(type(information) is dict):
links.append(information['url'])
descriptions.append(str(information['description'].encode("ascii", "ignore")))
fin = []
rating = 0.0
i = 0
for link in links:
thisDict = {}
rating = svm.compute(link)
thisDict['id'] = str(i+1)
thisDict['description'] = descriptions[i]
thisDict['url'] = link
thisDict['score'] = str(rating)
fin.append(thisDict)
i = i + 1
return json.dumps(fin)
def processURL(url):
toReturn = {}
score = svm.compute(url)
t = lxml.html.parse(url)
title = t.find(".//title").text
response = get(url)
extractor = Goose()
article = extractor.extract(raw_html=response.content)
file = article.cleaned_text
keywords = nlp.generateEntity(file)
toReturn['title'] = title
toReturn['score'] = score
toReturn['keywords'] = keywords
toReturn['url'] = url
return json.dumps(toReturn)
| mit | 6,087,950,690,244,941,000 | 25.591398 | 90 | 0.601294 | false |
caronc/nzbget-subliminal | Subliminal/apprise/plugins/NotifyTwitter/tweepy/auth.py | 1 | 6706 | from __future__ import print_function
import six
import logging
from .error import TweepError
from .api import API
import requests
from requests_oauthlib import OAuth1Session, OAuth1
from requests.auth import AuthBase
from six.moves.urllib.parse import parse_qs
WARNING_MESSAGE = """Warning! Due to a Twitter API bug, signin_with_twitter
and access_type don't always play nice together. Details
https://dev.twitter.com/discussions/21281"""
class AuthHandler(object):
def apply_auth(self, url, method, headers, parameters):
"""Apply authentication headers to request"""
raise NotImplementedError
def get_username(self):
"""Return the username of the authenticated user"""
raise NotImplementedError
class OAuthHandler(AuthHandler):
"""OAuth authentication handler"""
OAUTH_HOST = 'api.twitter.com'
OAUTH_ROOT = '/oauth/'
def __init__(self, consumer_key, consumer_secret, callback=None):
if type(consumer_key) == six.text_type:
consumer_key = consumer_key.encode('ascii')
if type(consumer_secret) == six.text_type:
consumer_secret = consumer_secret.encode('ascii')
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.access_token = None
self.access_token_secret = None
self.callback = callback
self.username = None
self.oauth = OAuth1Session(consumer_key,
client_secret=consumer_secret,
callback_uri=self.callback)
def _get_oauth_url(self, endpoint):
return 'https://' + self.OAUTH_HOST + self.OAUTH_ROOT + endpoint
def apply_auth(self):
return OAuth1(self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
decoding=None)
def _get_request_token(self, access_type=None):
try:
url = self._get_oauth_url('request_token')
if access_type:
url += '?x_auth_access_type=%s' % access_type
return self.oauth.fetch_request_token(url)
except Exception as e:
raise TweepError(e)
def set_access_token(self, key, secret):
self.access_token = key
self.access_token_secret = secret
def get_authorization_url(self,
signin_with_twitter=False,
access_type=None):
"""Get the authorization URL to redirect the user"""
try:
if signin_with_twitter:
url = self._get_oauth_url('authenticate')
if access_type:
logging.warning(WARNING_MESSAGE)
else:
url = self._get_oauth_url('authorize')
self.request_token = self._get_request_token(access_type=access_type)
return self.oauth.authorization_url(url)
except Exception as e:
raise TweepError(e)
def get_access_token(self, verifier=None):
"""
After user has authorized the request token, get access token
with user supplied verifier.
"""
try:
url = self._get_oauth_url('access_token')
self.oauth = OAuth1Session(self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.request_token['oauth_token'],
resource_owner_secret=self.request_token['oauth_token_secret'],
verifier=verifier, callback_uri=self.callback)
resp = self.oauth.fetch_access_token(url)
self.access_token = resp['oauth_token']
self.access_token_secret = resp['oauth_token_secret']
return self.access_token, self.access_token_secret
except Exception as e:
raise TweepError(e)
def get_xauth_access_token(self, username, password):
"""
Get an access token from an username and password combination.
In order to get this working you need to create an app at
http://twitter.com/apps, after that send a mail to [email protected]
and request activation of xAuth for it.
"""
try:
url = self._get_oauth_url('access_token')
oauth = OAuth1(self.consumer_key,
client_secret=self.consumer_secret)
r = requests.post(url=url,
auth=oauth,
headers={'x_auth_mode': 'client_auth',
'x_auth_username': username,
'x_auth_password': password})
credentials = parse_qs(r.content)
return credentials.get('oauth_token')[0], credentials.get('oauth_token_secret')[0]
except Exception as e:
raise TweepError(e)
def get_username(self):
if self.username is None:
api = API(self)
user = api.verify_credentials()
if user:
self.username = user.screen_name
else:
raise TweepError('Unable to get username,'
' invalid oauth token!')
return self.username
class OAuth2Bearer(AuthBase):
def __init__(self, bearer_token):
self.bearer_token = bearer_token
def __call__(self, request):
request.headers['Authorization'] = 'Bearer ' + self.bearer_token
return request
class AppAuthHandler(AuthHandler):
"""Application-only authentication handler"""
OAUTH_HOST = 'api.twitter.com'
OAUTH_ROOT = '/oauth2/'
def __init__(self, consumer_key, consumer_secret):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self._bearer_token = ''
resp = requests.post(self._get_oauth_url('token'),
auth=(self.consumer_key,
self.consumer_secret),
data={'grant_type': 'client_credentials'})
data = resp.json()
if data.get('token_type') != 'bearer':
raise TweepError('Expected token_type to equal "bearer", '
'but got %s instead' % data.get('token_type'))
self._bearer_token = data['access_token']
def _get_oauth_url(self, endpoint):
return 'https://' + self.OAUTH_HOST + self.OAUTH_ROOT + endpoint
def apply_auth(self):
return OAuth2Bearer(self._bearer_token)
| gpl-3.0 | -4,268,023,677,189,067,000 | 36.674157 | 102 | 0.570385 | false |
dreibh/planetlab-lxc-plcapi | PLC/ConfFiles.py | 1 | 5697 | #
# Functions for interacting with the conf_files table in the database
#
# Mark Huang <[email protected]>
# Copyright (C) 2006 The Trustees of Princeton University
#
from PLC.Faults import *
from PLC.Parameter import Parameter
from PLC.Filter import Filter
from PLC.Table import Row, Table
from PLC.Nodes import Node, Nodes
from PLC.NodeGroups import NodeGroup, NodeGroups
class ConfFile(Row):
"""
Representation of a row in the conf_files table. To use,
instantiate with a dict of values.
"""
table_name = 'conf_files'
primary_key = 'conf_file_id'
join_tables = ['conf_file_node', 'conf_file_nodegroup']
fields = {
'conf_file_id': Parameter(int, "Configuration file identifier"),
'enabled': Parameter(bool, "Configuration file is active"),
'source': Parameter(str, "Relative path on the boot server where file can be downloaded", max = 255),
'dest': Parameter(str, "Absolute path where file should be installed", max = 255),
'file_permissions': Parameter(str, "chmod(1) permissions", max = 20),
'file_owner': Parameter(str, "chown(1) owner", max = 50),
'file_group': Parameter(str, "chgrp(1) owner", max = 50),
'preinstall_cmd': Parameter(str, "Shell command to execute prior to installing", max = 1024, nullok = True),
'postinstall_cmd': Parameter(str, "Shell command to execute after installing", max = 1024, nullok = True),
'error_cmd': Parameter(str, "Shell command to execute if any error occurs", max = 1024, nullok = True),
'ignore_cmd_errors': Parameter(bool, "Install file anyway even if an error occurs"),
'always_update': Parameter(bool, "Always attempt to install file even if unchanged"),
'node_ids': Parameter(int, "List of nodes linked to this file"),
'nodegroup_ids': Parameter(int, "List of node groups linked to this file"),
}
def add_node(self, node, commit = True):
"""
Add configuration file to node.
"""
assert 'conf_file_id' in self
assert isinstance(node, Node)
assert 'node_id' in node
conf_file_id = self['conf_file_id']
node_id = node['node_id']
if node_id not in self['node_ids']:
self.api.db.do("INSERT INTO conf_file_node (conf_file_id, node_id)" \
" VALUES(%(conf_file_id)d, %(node_id)d)",
locals())
if commit:
self.api.db.commit()
self['node_ids'].append(node_id)
node['conf_file_ids'].append(conf_file_id)
def remove_node(self, node, commit = True):
"""
Remove configuration file from node.
"""
assert 'conf_file_id' in self
assert isinstance(node, Node)
assert 'node_id' in node
conf_file_id = self['conf_file_id']
node_id = node['node_id']
if node_id in self['node_ids']:
self.api.db.do("DELETE FROM conf_file_node" \
" WHERE conf_file_id = %(conf_file_id)d" \
" AND node_id = %(node_id)d",
locals())
if commit:
self.api.db.commit()
self['node_ids'].remove(node_id)
node['conf_file_ids'].remove(conf_file_id)
def add_nodegroup(self, nodegroup, commit = True):
"""
Add configuration file to node group.
"""
assert 'conf_file_id' in self
assert isinstance(nodegroup, NodeGroup)
assert 'nodegroup_id' in nodegroup
conf_file_id = self['conf_file_id']
nodegroup_id = nodegroup['nodegroup_id']
if nodegroup_id not in self['nodegroup_ids']:
self.api.db.do("INSERT INTO conf_file_nodegroup (conf_file_id, nodegroup_id)" \
" VALUES(%(conf_file_id)d, %(nodegroup_id)d)",
locals())
if commit:
self.api.db.commit()
self['nodegroup_ids'].append(nodegroup_id)
nodegroup['conf_file_ids'].append(conf_file_id)
def remove_nodegroup(self, nodegroup, commit = True):
"""
Remove configuration file from node group.
"""
assert 'conf_file_id' in self
assert isinstance(nodegroup, NodeGroup)
assert 'nodegroup_id' in nodegroup
conf_file_id = self['conf_file_id']
nodegroup_id = nodegroup['nodegroup_id']
if nodegroup_id in self['nodegroup_ids']:
self.api.db.do("DELETE FROM conf_file_nodegroup" \
" WHERE conf_file_id = %(conf_file_id)d" \
" AND nodegroup_id = %(nodegroup_id)d",
locals())
if commit:
self.api.db.commit()
self['nodegroup_ids'].remove(nodegroup_id)
nodegroup['conf_file_ids'].remove(conf_file_id)
class ConfFiles(Table):
"""
Representation of the conf_files table in the database.
"""
def __init__(self, api, conf_file_filter = None, columns = None):
Table.__init__(self, api, ConfFile, columns)
sql = "SELECT %s FROM view_conf_files WHERE True" % \
", ".join(self.columns)
if conf_file_filter is not None:
if isinstance(conf_file_filter, (list, tuple, set, int)):
conf_file_filter = Filter(ConfFile.fields, {'conf_file_id': conf_file_filter})
elif isinstance(conf_file_filter, dict):
conf_file_filter = Filter(ConfFile.fields, conf_file_filter)
sql += " AND (%s) %s" % conf_file_filter.sql(api)
self.selectall(sql)
| bsd-3-clause | 920,400,716,194,892,900 | 36.235294 | 116 | 0.574864 | false |
damomeen/pox-datapath | pox/openflow/nicira.py | 1 | 70497 | # Copyright 2012,2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# For lots of documentation, see Open vSwitch's nicira-ext.h and ofp-msgs.h
from pox.core import core
from pox.lib.util import initHelper
from pox.lib.util import hexdump
from pox.lib.addresses import parse_cidr, IPAddr, EthAddr
import pox.openflow.libopenflow_01 as of
from pox.openflow.libopenflow_01 import ofp_header, ofp_vendor_base
from pox.openflow.libopenflow_01 import _PAD, _PAD2, _PAD4, _PAD6
from pox.openflow.libopenflow_01 import _unpack, _read, _skip
import struct
# -----------------------------------------------------------------------
# OpenFlow Stuff
# -----------------------------------------------------------------------
# Technically, this stuff is part of OpenFlow 1.1+ and shouldn't be in
# this file. Since we don't have 1.1+ support yet, it's here at least
# temporarily.
OFPR_INVALID_TTL = 2 # Packet has invalid TTL
OFPC_INVALID_TTL_TO_CONTROLLER = 4
# -----------------------------------------------------------------------
# Nicira extensions
# -----------------------------------------------------------------------
NX_VENDOR_ID = 0x00002320
def _init_constants ():
actions = [
"NXAST_SNAT__OBSOLETE",
"NXAST_RESUBMIT",
"NXAST_SET_TUNNEL",
"NXAST_DROP_SPOOFED_ARP__OBSOLETE",
"NXAST_SET_QUEUE",
"NXAST_POP_QUEUE",
"NXAST_REG_MOVE",
"NXAST_REG_LOAD",
"NXAST_NOTE",
"NXAST_SET_TUNNEL64",
"NXAST_MULTIPATH",
"NXAST_AUTOPATH__DEPRECATED",
"NXAST_BUNDLE",
"NXAST_BUNDLE_LOAD",
"NXAST_RESUBMIT_TABLE",
"NXAST_OUTPUT_REG",
"NXAST_LEARN",
"NXAST_EXIT",
"NXAST_DEC_TTL",
"NXAST_FIN_TIMEOUT",
"NXAST_CONTROLLER",
"NXAST_DEC_TTL_CNT_IDS",
"NXAST_WRITE_METADATA",
"NXAST_PUSH_MPLS",
"NXAST_POP_MPLS",
"NXAST_SET_MPLS_TTL",
"NXAST_DEC_MPLS_TTL",
"NXAST_STACK_PUSH",
"NXAST_STACK_POP",
"NXAST_SAMPLE",
]
for i,name in enumerate(actions):
globals()[name] = i
_init_constants()
NXT_ROLE_REQUEST = 10
NXT_ROLE_REPLY = 11
NXT_SET_FLOW_FORMAT = 12
NXT_FLOW_MOD = 13
NXT_FLOW_MOD_TABLE_ID = 15
NXT_SET_PACKET_IN_FORMAT = 16
NXT_PACKET_IN = 17
NXT_FLOW_AGE = 18
NXT_SET_ASYNC_CONFIG = 19
NXT_SET_CONTROLLER_ID = 20
NXT_FLOW_MONITOR_CANCEL = 21
NXT_FLOW_MONITOR_PAUSED = 22
NXT_FLOW_MONITOR_RESUMED = 23
NXST_FLOW_MONITOR_REQUEST = 2
NXST_FLOW_MONITOR_REPLY = 2
#TODO: Replace with version in pox.lib?
def _issubclass (a, b):
try:
return issubclass(a, b)
except TypeError:
return False
class nicira_base (ofp_vendor_base):
"""
Base class for Nicira extensions
"""
_MIN_LENGTH = 16
vendor = NX_VENDOR_ID
#subtype = None # Set
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return True
def _init (self, kw):
"""
Initialize fields
Overide this.
"""
pass
def _pack_body (self):
"""
Pack body.
"""
return b""
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return ""
def __init__ (self, **kw):
ofp_vendor_base.__init__(self)
self._init(kw)
assert hasattr(self, 'vendor')
assert hasattr(self, 'subtype')
initHelper(self, kw)
def pack (self):
assert self._assert()
packed = b""
packed += ofp_vendor_base.pack(self)
packed += struct.pack("!LL", self.vendor, self.subtype)
packed += self._pack_body()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset,(self.vendor,self.subtype) = _unpack("!LL", raw, offset)
offset = self._unpack_body(raw, offset, length-16)
return offset,length
def __len__ (self):
return 16 + self._body_length()
def __eq__ (self, other):
if type(self) != type(other): return False
if not ofp_vendor_base.__eq__(self, other): return False
if self.vendor != other.vendor: return False
if self.subtype != other.subtype: return False
return self._eq(other)
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_vendor_base.show(self, prefix + ' ')
outstr += prefix + 'vendor: ' + str(self.vendor) + '\n'
outstr += prefix + 'subtype: ' + len(self.subtype) + '\n'
outstr += self._show(prefix)
return outstr
class nx_flow_mod_table_id (nicira_base):
"""
Used to enable the flow mod table ID extension
When this is enabled, a slightly altered ofp_flow_mod can be used
to set the table for a flow insertion. A convenient version of this
slightly altered flow_mod is available as ofp_flow_mod_table_id.
"""
subtype = NXT_FLOW_MOD_TABLE_ID
_MIN_LENGTH = 16 + 8
def _init (self, kw):
self.enable = True # Called "set" by OVS
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.enable == other.enable
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!B", 1 if self.enable else 0) + (of._PAD * 7)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(enable,) = of._unpack("!B", raw, offset)
offset = of._skip(raw, offset, 7)
self.enable = True if enable else False
return offset
def _body_length (self):
"""
Return length of body.
Optionally override this.
"""
return len(self._pack_body())
def _show (self, prefix):
"""
Format additional fields as text
"""
return prefix + "set: " + str(self.enable) + "\n"
class ofp_flow_mod_table_id (of.ofp_flow_mod):
"""
A subclass of ofp_flow_mod which has a table_id
This is for use with the NXT_FLOW_MOD_TABLE_ID extension.
"""
#TODO: It'd be nice if this were a cleaner subclass of the original,
# but it didn't really lend itself to subclassing.
def __init__ (self, **kw):
self.table_id = 0xff
of.ofp_flow_mod.__init__(self, **kw)
@property
def _command (self):
return chr(self.table_id) + chr(self.command)
@_command.setter
def _command (self, v):
self.table_id = ord(v[0])
self.command = ord(v[1])
# Unfortunately, there's no clean way to reuse a lot of the superclass,
# so we copy and paste... Gross.
# (Might be worth tweaking the superclass to make this cleaner.)
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
packed = b""
packed += ofp_header.pack(self)
packed += self.match.pack(flow_mod=True)
packed += struct.pack("!QHHHHLHH", self.cookie, self._command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
return packed
def unpack (self, raw, offset=0):
offset,length = self._unpack_header(raw, offset)
offset = self.match.unpack(raw, offset, flow_mod=True)
offset,(self.cookie, self._command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags) = \
_unpack("!QHHHHLHH", raw, offset)
offset,self.actions = _unpack_actions(raw,
length-(32 + len(self.match)), offset)
assert length == len(self)
return offset,length
def __eq__ (self, other):
r = of.ofp_flow_mod(self, other)
if r:
if self.table_id != other.table_id: return False
return True
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'match: \n'
outstr += self.match.show(prefix + ' ')
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
outstr += prefix + 'command: ' + str(self.command) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'idle_timeout: ' + str(self.idle_timeout) + '\n'
outstr += prefix + 'hard_timeout: ' + str(self.hard_timeout) + '\n'
outstr += prefix + 'priority: ' + str(self.priority) + '\n'
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'out_port: ' + str(self.out_port) + '\n'
outstr += prefix + 'flags: ' + str(self.flags) + '\n'
outstr += prefix + 'actions: \n'
for obj in self.actions:
outstr += obj.show(prefix + ' ')
return outstr
class nx_flow_mod (of.ofp_flow_mod, of.ofp_vendor_base):
"""
A flow mod command that uses Nicira extended matches
This has a table_id attribute, which only works if you have enabled
the nx_flow_mod_table_id option.
"""
_MIN_LENGTH = 32
header_type = of.OFPT_VENDOR
vendor = NX_VENDOR_ID
subtype = NXT_FLOW_MOD
def __init__ (self, **kw):
self.table_id = 0
of.ofp_flow_mod.__init__(self, **kw)
if 'match' not in kw:
# Superclass created an ofp_match -- replace it
self.match = nx_match()
def _validate (self):
if not isinstance(self.match, nx_match):
return "match is not class ofp_match"
return None
def pack (self):
"""
Packs this object into its wire format.
May normalize fields.
NOTE: If "data" has been specified, this method may actually return
*more than just a single ofp_flow_mod* in packed form.
Specifically, it may also have a barrier and an ofp_packet_out.
"""
po = None
if self.data:
#TODO: It'd be nice to log and then ignore if not data_is_complete.
# Unfortunately, we currently have no logging in here, so we
# assert instead which is a either too drastic or too quiet.
assert self.data.is_complete
assert self.buffer_id is None
self.buffer_id = self.data.buffer_id
if self.buffer_id is None:
po = ofp_packet_out(data=self.data)
po.in_port = self.data.in_port
po.actions.append(ofp_action_output(port = OFPP_TABLE))
# Should maybe check that packet hits the new entry...
# Or just duplicate the actions? (I think that's the best idea)
assert self._assert()
match = self.match.pack()
match_len = len(match)
command = self.command
command |= (self.table_id << 8)
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LL", self.vendor, self.subtype)
packed += struct.pack("!QHHHHLHHH", self.cookie, command,
self.idle_timeout, self.hard_timeout,
self.priority, self._buffer_id, self.out_port,
self.flags, match_len)
packed += _PAD6
packed += match
packed += _PAD * ((match_len + 7)/8*8 - match_len)
for i in self.actions:
packed += i.pack()
if po:
packed += ofp_barrier_request().pack()
packed += po.pack()
assert len(packed) == len(self)
return packed
def unpack (self, raw, offset=0):
_o = offset
offset,length = self._unpack_header(raw, offset)
offset,(vendor,subtype) = _unpack("!LL", raw, offset)
offset,(self.cookie, self.command, self.idle_timeout,
self.hard_timeout, self.priority, self._buffer_id,
self.out_port, self.flags, match_len) = \
_unpack("!QHHHHLHHH", raw, offset)
offset = self._skip(raw, offset, 6)
offset = self.match.unpack(raw, offset, match_len)
offset,self.actions = of._unpack_actions(raw,
length-(offset - _o), offset)
assert length == len(self)
return offset,length
def __len__ (self):
match_len = len(self.match)
l = 8 + 4 + 4
l += 8 + 2 + 2 + 2 + 2 + 4 + 2 + 2
l += 2 # match_len
l += 6 # pad
l += match_len
l += (match_len + 7)//8*8 - match_len
for i in self.actions:
l += len(i)
return l
# Packet_in formats
NXPIF_OPENFLOW10 = 0 # Standard OpenFlow 1.0 packet_in format
NXPIF_NXM = 1 # Nicira Extended packet_in format
class nx_packet_in_format (nicira_base):
subtype = NXT_SET_PACKET_IN_FORMAT
_MIN_LENGTH = 16 + 4
def _init (self, kw):
self.format = NXPIF_NXM # Extended packet_in format
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.format == other.format
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!I", self.format)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(self.format,) = of._unpack("!I", raw, offset)
return offset
def _show (self, prefix):
"""
Format additional fields as text
"""
s = prefix + "format: "
if self.format == NXPIF_NXM:
s += "NXM"
elif self.format == NXPIF_OPENFLOW10:
s += "OF1.0"
else:
s += str(self.format)
return s + "\n"
NX_ROLE_OTHER = 0
NX_ROLE_MASTER = 1
NX_ROLE_SLAVE = 2
class nx_role_request (nicira_base):
"""
Requests master/slave/other role type
Can initialize with role=NX_ROLE_x or with, e.g., master=True.
"""
subtype = NXT_ROLE_REQUEST
_MIN_LENGTH = 16 + 4
def _init (self, kw):
self.role = NX_ROLE_OTHER
if kw.pop("other", False):
self.role = NX_ROLE_OTHER
if kw.pop("master", False):
self.role = NX_ROLE_MASTER
if kw.pop("slave", False):
self.role = NX_ROLE_SLAVE
@property
def master (self):
return self.role == NX_ROLE_MASTER
@property
def slave (self):
return self.role == NX_ROLE_SLAVE
@property
def other (self):
return self.role == NX_ROLE_OTHER
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
return self.role == other.role
def _pack_body (self):
"""
Pack body.
"""
return struct.pack("!I", self.role)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,(self.role,) = of._unpack("!I", raw, offset)
return offset
def _show (self, prefix):
"""
Format additional fields as text
"""
s = prefix + "role: "
s += {NX_ROLE_OTHER:"other",NX_ROLE_MASTER:"master",
NX_ROLE_SLAVE:"slave"}.get(self.role, str(self.role))
return s + "\n"
class nx_role_reply (nx_role_request):
subtype = NXT_ROLE_REPLY
pass
# -----------------------------------------------------------------------
# Actions
# -----------------------------------------------------------------------
class nx_output_reg (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_OUTPUT_REG
self.offset = 0
self.nbits = None
self.reg = None # an nxm_entry class
self.max_len = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.offset != other.offset: return False
if self.nbits != other.nbits: return False
if self.reg != other.reg: return False
if self.max_len != other.max_len: return False
return True
def _pack_body (self):
nbits = self.nbits - 1
assert nbits >= 0 and nbits <= 63
assert self.offset >= 0 and self.offset < (1 << 10)
ofs_nbits = self.offset << 6 | nbits
o = self.reg()
o._force_mask = False
reg = o.pack(omittable=False, header_only=True)
p = struct.pack('!HH4sH', self.subtype, ofs_nbits, reg, self.max_len)
p += _PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype, ofs_nbits, reg, self.max_len, _, _) = \
of._unpack('!HH4sHHI', raw, offset)
self.offset = ofs_nbits >> 6
self.nbits = (ofs_nbits & 0x3f) + 1
self.reg = _class_for_nxm_header(reg)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('reg: %s\n' % (self.reg,))
s += prefix + ('max_len: %s\n' % (self.max_len,))
return s
class nx_reg_move (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_REG_MOVE
self.nbits = None
self.dst = None # an nxm_entry class
self.dst_ofs = 0
self.src = None # an nxm_entry_class
self.src_ofs = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.nbits != other.nbits: return False
if self.dst != other.dst: return False
if self.dst_ofs != other.dst_ofs: return False
if self.src != other.src: return False
if self.src_ofs != other.src_ofs: return False
return True
def _pack_body (self):
if self.nbits is None:
a = self.dst._nxm_length * 8 - self.dst_ofs
b = self.src._nxm_length * 8 - self.src_ofs
self.nbits = min(a,b)
o = self.dst()
o._force_mask = False
dst = o.pack(omittable=False, header_only=True)
o = self.src()
o._force_mask = False
src = o.pack(omittable=False, header_only=True)
p = struct.pack('!HHHH4s4s', self.subtype, self.nbits, self.src_ofs,
self.dst_ofs, src, dst)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.nbits, self.src_ofs, self.dst_ofs, src, dst) = \
of._unpack('!HHHH4s4s', raw, offset)
self.dst = _class_for_nxm_header(dst)
self.src = _class_for_nxm_header(src)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('src_ofs: %s\n' % (self.src_ofs,))
s += prefix + ('dst_ofs: %s\n' % (self.dst_ofs,))
s += prefix + ('src: %s\n' % (self.src,))
s += prefix + ('dst: %s\n' % (self.dst,))
return s
class nx_reg_load (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_REG_LOAD
self.offset = 0
self.nbits = None
self.dst = None # an nxm_entry class
self.value = 0
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.offset != other.offset: return False
if self.nbits != other.nbits: return False
if self.dst != other.dst: return False
if self.value != other.value: return False
return True
def _pack_body (self):
if self.nbits is None:
self.nbits = self.dst._nxm_length * 8 - self.offset
nbits = self.nbits - 1
assert nbits >= 0 and nbits <= 63
assert self.offset >= 0 and self.offset < (1 << 10)
ofs_nbits = self.offset << 6 | nbits
o = self.dst()
o._force_mask = False
dst = o.pack(omittable=False, header_only=True)
p = struct.pack('!HH4sQ', self.subtype, ofs_nbits, dst, self.value)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,ofs_nbits, dst, self.value) = \
of._unpack('!HH4sQ', raw, offset)
self.offset = ofs_nbits >> 6
self.nbits = (ofs_nbits & 0x3f) + 1
self.dst = _class_for_nxm_header(dst)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('offset: %s\n' % (self.offset,))
s += prefix + ('nbits: %s\n' % (self.nbits,))
s += prefix + ('dst: %s\n' % (self.dst,))
s += prefix + ('value: %s\n' % (self.value,))
return s
class nx_action_controller (of.ofp_action_vendor_base):
"""
Sends packet to controller
This is similar to an output to OFPP_CONTROLLER, but allows setting
the reason field and controller id to send to.
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_CONTROLLER
self.max_len = 0xffFF
self.controller_id = 0
self.reason = of.OFPR_ACTION
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.max_len != other.max_len: return False
if self.controller_id != other.controller_id: return False
if self.reason != other.reason: return False
return True
def _pack_body (self):
p = struct.pack('!HHHB', self.subtype, self.max_len, self.controller_id,
self.reason)
p += of._PAD
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.max_len, self.controller_id, self.reason) = \
of._unpack('!HHHB', raw, offset)
offset = of._skip(raw, offset, 1)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('max_len: %s\n' % (self.max_len,))
s += prefix + ('controller_id: %s\n' % (self.controller_id,))
s += prefix + ('reason: %s\n' % (self.reason,))
return s
class nx_action_resubmit (of.ofp_action_vendor_base):
"""
Used with both resubmit and resubmit_table.
Generally, you want to use one of the factory methods.
"""
@classmethod
def resubmit (cls, in_port = of.OFPP_IN_PORT):
return cls(subtype = NXAST_RESUBMIT, in_port = in_port, table = 0)
@classmethod
def resubmit_table (cls, table = 255, in_port = of.OFPP_IN_PORT):
return cls(subtype = NXAST_RESUBMIT_TABLE, in_port = in_port,
table = table)
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_RESUBMIT
self.in_port = None # New in_port for checking flow table
self.table = None # NXAST_RESUBMIT_TABLE: table to use
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.in_port != other.in_port: return False
if self.table != other.table: return False
return True
def _pack_body (self):
p = struct.pack('!HHB', self.subtype, self.in_port, self.table)
p += of._PAD3
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.in_port,self.table) = \
of._unpack('!HHB', raw, offset)
offset = of._skip(raw, offset, 3)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('in_port: %s\n' % (self.in_port,))
s += prefix + ('table: %s\n' % (self.table,))
return s
class nx_action_set_tunnel (of.ofp_action_vendor_base):
"""
Set a 32-bit tunnel ID
See also: nx_action_set_tunnel64
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_SET_TUNNEL
self.tun_id = None # Must set
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.tun_id != other.tun_id: return False
return True
def _pack_body (self):
p = struct.pack('!HHI', self.subtype, 0, self.tun_id)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 2)
offset,(self.tun_id,) = of._unpack('!I', raw, offset)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('tub_id: %s\n' % (self.tun_id,))
return s
class nx_action_set_tunnel64 (of.ofp_action_vendor_base):
"""
Set a 64-bit tunnel ID
See also: nx_action_set_tunnel
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_SET_TUNNEL64
self.tun_id = None # Must set
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.tun_id != other.tun_id: return False
return True
def _pack_body (self):
p = struct.pack('!HHIQ', self.subtype, 0, 0, self.tun_id)
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
offset,(self.tun_id,) = of._unpack('!Q', raw, offset)
return offset
def _body_length (self):
return 16
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('tub_id: %s\n' % (self.tun_id,))
return s
class nx_action_fin_timeout (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_FIN_TIMEOUT
self.fin_idle_timeout = 1 # New idle timeout, if nonzero.
self.fin_hard_timeout = 1 # New hard timeout, if nonzero.
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.fin_idle_timeout != other.fin_idle_timeout: return False
if self.fin_hard_timeout != other.fin_hard_timeout: return False
return True
def _pack_body (self):
p = struct.pack('!HHH', self.subtype, self.fin_idle_timeout,
self.fin_hard_timeout)
p += of._PAD2
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,self.fin_idle_timeout,self.fin_hard_timeout) = \
of._unpack('!HHH', raw, offset)
offset = of._skip(raw, offset, 2)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
s += prefix + ('fin_idle_timeout: %s\n' % (self.fin_idle_timeout,))
s += prefix + ('fin_hard_timeout: %s\n' % (self.fin_hard_timeout,))
return s
class nx_action_exit (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_EXIT
def _eq (self, other):
if self.subtype != other.subtype: return False
return True
def _pack_body (self):
p = struct.pack('!H', self.subtype)
p += of._PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = \
of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
return s
class nx_action_dec_ttl (of.ofp_action_vendor_base):
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_DEC_TTL
def _eq (self, other):
if self.subtype != other.subtype: return False
return True
def _pack_body (self):
p = struct.pack('!H', self.subtype)
p += of._PAD6
return p
def _unpack_body (self, raw, offset, avail):
offset,(self.subtype,) = of._unpack('!H', raw, offset)
offset = of._skip(raw, offset, 6)
return offset
def _body_length (self):
return 8
def _show (self, prefix):
s = ''
s += prefix + ('subtype: %s\n' % (self.subtype,))
return s
# -----------------------------------------------------------------------
# Learn action
# -----------------------------------------------------------------------
class nx_action_learn (of.ofp_action_vendor_base):
"""
Allows table entries to add table entries
There are different ways of adding flow_mod_specs. For example, the
following are all equivalent:
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
fms = nx.flow_mod_spec.new # Just abbreviating this
learn.spec.append(fms( field=nx.NXM_OF_VLAN_TCI, n_bits=12 ))
learn.spec.append(fms( field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST ))
learn.spec.append(fms( field=nx.NXM_OF_IN_PORT, output=True ))
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
learn.spec.chain(
field=nx.NXM_OF_VLAN_TCI, n_bits=12).chain(
field=nx.NXM_OF_ETH_SRC, match=nx.NXM_OF_ETH_DST).chain(
field=nx.NXM_OF_IN_PORT, output=True)
learn = nx.nx_action_learn(table_id=1,hard_timeout=10)
learn.spec = [
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_VLAN_TCI),
n_bits=12),
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_ETH_SRC),
dst=nx.nx_learn_dst_match(nx.NXM_OF_ETH_DST)),
nx.flow_mod_spec(src=nx.nx_learn_src_field(nx.NXM_OF_IN_PORT),
dst=nx.nx_learn_dst_output())
]
"""
def _init (self, kw):
self.vendor = NX_VENDOR_ID
self.subtype = NXAST_LEARN
self.idle_timeout = 0
self.hard_timeout = 0
self.priority = of.OFP_DEFAULT_PRIORITY
self.cookie = 0
self.flags = 0
self.table_id = 0
self.fin_idle_timeout = 0
self.fin_hard_timeout = 0
self.spec = flow_mod_spec_chain()
@property
def table (self):
"""
Synonym for table_id
"""
return self.table_id
@table.setter
def table (self, value):
self.table_id = value
def _eq (self, other):
if self.subtype != other.subtype: return False
if self.idle_timeout != other.idle_timeout: return False
if self.hard_timeout != other.hard_timeout: return False
if self.priority != other.priority: return False
if self.cookie != other.cookie: return False
if self.flags != other.flags: return False
if self.table_id != other.table_id: return False
if self.fin_idle_timeout != other.fin_idle_timeout: return False
if self.fin_hard_timeout != other.fin_hard_timeout: return False
return True
def _pack_body (self):
p = struct.pack('!HHHHQHBBHH',
self.subtype,
self.idle_timeout,
self.hard_timeout,
self.priority,
self.cookie,
self.flags,
self.table_id,
0,
self.fin_idle_timeout,
self.fin_hard_timeout)
for fs in self.spec:
p += fs.pack()
if len(p) % 8:
p += '\x00' * (8-(len(p)%8))
return p
def _unpack_body (self, raw, offset, avail):
orig_offset = offset
offset,(self.subtype, self.idle_timeout, self.hard_timeout,
self.priority, self.cookie, self.flags, self.table_id, _,
self.fin_idle_timeout,
self.fin_hard_timeout) = of._unpack('!HHHHQHBBHH', raw, offset)
avail -= (2+2+2+2+8+2+1+1+2+2)
assert (avail & 1) == 0
while avail > 0:
newoff, fms = flow_mod_spec.unpack_new(raw, offset)
if fms is None: break
self.spec.append(fms)
avail -= (newoff - offset)
offset = newoff
length = offset - orig_offset
if length % 8:
offset = of._skip(raw, offset, 8 - (length%8))
return offset
def _show (self, prefix):
s = ''
ff = ('idle_timeout hard_timeout priority cookie flags table_id '
'fin_idle_timeout fin_hard_timeout').split()
for f in ff:
s += prefix
s += f + ": "
s += str(getattr(self, f))
s += "\n"
return s
NX_LEARN_SRC_FIELD = 0
NX_LEARN_SRC_IMMEDIATE = 1
NX_LEARN_DST_MATCH = 0
NX_LEARN_DST_LOAD = 1
NX_LEARN_DST_OUTPUT = 2
class nx_learn_spec (object):
_is_src = False
_is_dst = False
data = None
n_bits = None
value = None
def pack (self):
return self.data if self.data else b''
@classmethod
def unpack_subclass (cls, spec, n_bits, raw, offset):
"""
Returns (new_offset, object)
"""
assert cls is not nx_learn_spec, "Must call on subclass"
c = _flow_mod_spec_to_class(cls._is_src, spec)
offset,o = c.unpack_new(n_bits, raw, offset)
return offset, o
@classmethod
def unpack_new (cls, n_bits, raw, offset):
"""
Returns (new_offset, object)
"""
o = cls.__new__(cls)
o.n_bits = n_bits
datalen = len(o)
if datalen != 0:
offset,o.data = of._read(raw, offset, datalen)
return offset,o
def __len__ (self):
# Implement. Can't use .data field.
assert False, "__len__ unimplemented in " + type(self).__name__
def __repr__ (self):
return "<%s n_bits:%s>" % (type(self).__name__, self.n_bits)
class nx_learn_spec_src (nx_learn_spec):
_is_src = True
class nx_learn_spec_dst (nx_learn_spec):
_is_dst = True
class _field_and_match (object):
"""
Common functionality for src_field and dst_match
"""
def __init__ (self, field, ofs = 0, n_bits = None):
#if type(field) is type: field = field()
data = field().pack(omittable = False, header_only = True)
data += struct.pack("!H", ofs)
if n_bits is None:
n_bits = field._nxm_length * 8 - ofs
elif n_bits < 0:
n_bits = field._nxm_length * 8 - ofs - n_bits
self.n_bits = n_bits
self.data = data
@property
def ofs (self):
return struct.unpack_from("!H", self.data, 4)[0]
@property
def field (self):
t,_,_ = nxm_entry.unpack_header(self.data, 0)
c = _nxm_type_to_class.get(t)
if c is None:
attrs = {'_nxm_type':t}
attrs['_nxm_length'] = length/2 if has_mask else length
c = type('nxm_type_'+str(t), (NXM_GENERIC,), attrs)
return c
def __len__ (self):
return 6
class nx_learn_src_field (_field_and_match, nx_learn_spec_src):
value = NX_LEARN_SRC_FIELD
@property
def matching (self):
"""
Returns a corresponding nx_learn_dst_match
"""
return nx_learn_dst_match(self.field, self.ofs, self.n_bits)
class nx_learn_src_immediate (nx_learn_spec_src):
"""
An immediate value for a flow spec
Probably generally a good idea to use one of the factory methods, e.g., u8().
"""
value = NX_LEARN_SRC_IMMEDIATE
def __init__ (self, data, n_bits = None):
if n_bits is None:
assert (len(data)&1) == 0, "data needs pad; n_bits cannot be inferred"
n_bits = len(data)*8
else:
assert len(data)*8 >= n_bits, "n_bits larger than data"
self.n_bits = n_bits
self.data = data
@classmethod
def u8 (cls, dst, value):
return cls(struct.pack("!H", value))
@classmethod
def u16 (cls, dst, value):
return cls(struct.pack("!H", value))
@classmethod
def u32 (cls, dst, value):
return cls(struct.pack("!L", value))
def __len__ (self):
return ((self.n_bits+15) // 16) * 2
class nx_learn_dst_match (_field_and_match, nx_learn_spec_dst):
value = NX_LEARN_DST_MATCH
class nx_learn_dst_load (nx_learn_spec_dst):
value = NX_LEARN_DST_LOAD
def __init__ (self, field, ofs = 0, n_bits = None):
data = field().pack(omittable = False, header_only = True)
data += struct.pack("!H", ofs)
if n_bits is None:
n_bits = field._nxm_length * 8 - ofs
elif n_bits < 0:
n_bits = field._nxm_length * 8 - ofs - n_bits
self.n_bits = n_bits
self.data = data
def __len__ (self):
return ((self.n_bits+15) // 16) * 2
class nx_learn_dst_output (nx_learn_spec_dst):
value = NX_LEARN_DST_OUTPUT
def __init__ (self, dummy = True):
assert dummy is True
super(nx_learn_dst_output,self).__init__()
def __len__ (self):
return 0
def _flow_mod_spec_to_class (is_src, val):
#TODO: Use a class registry and decorator for these instead of this hack
if is_src:
d = {
NX_LEARN_SRC_FIELD: nx_learn_src_field,
NX_LEARN_SRC_IMMEDIATE: nx_learn_src_immediate,
}
else:
d = {
NX_LEARN_DST_MATCH: nx_learn_dst_match,
NX_LEARN_DST_LOAD: nx_learn_dst_load,
NX_LEARN_DST_OUTPUT: nx_learn_dst_output,
}
return d.get(val)
class flow_mod_spec_chain (list):
def chain (self, *args, **kw):
self.append(flow_mod_spec.new(*args,**kw))
return self
#class _meta_fms (type):
# @property
# def chain (self):
# return _flow_mod_spec_chain()
class flow_mod_spec (object):
# __metaclass__ = _meta_fms
@classmethod
def create (cls, src, dst = None, n_bits = None):
#TODO: Remove me
return cls(src, dst, n_bits)
def __init__ (self, src, dst = None, n_bits = None):
assert src._is_src
if dst is None:
# Assume same as src
assert type(src) == nx_learn_src_field
dst = src.matching
assert dst._is_dst
#TODO: Check whether there's enough space in dst
# (This will require figuring out what the right length for output is...
# 16 bits?)
if n_bits is None:
n_bits = src.n_bits
if n_bits is None:
n_bits = dst.n_bits
else:
if dst.n_bits is not None and dst.n_bits > n_bits:
raise RuntimeError("dst n_bits greater than source n_bits "
"(%s and %s); cannot infer" % (n_bits,dst.n_bits))
if n_bits is None:
raise RuntimeError("cannot infer n_bits")
#o = cls.__new__(cls)
#o.src = src
#o.dst = dst
#o.n_bits = n_bits
#return o
#return cls(src, dst, n_bits)
self.src = src
self.dst = dst
self.n_bits = n_bits
def __repr__ (self):
return "%s(src=%s, dst=%s, n_bits=%s)" % (
type(self).__name__, self.src, self.dst, self.n_bits)
# @staticmethod
# def chain ():
# return _flow_mod_spec_chain()
@classmethod
def new (cls, src=None, dst=None, **kw):
if src is not None: kw['src'] = src
if dst is not None: kw['dst'] = dst
src = None
dst = None
srcarg = ()
dstarg = ()
srckw = {}
dstkw = {}
src_inst = None
dst_inst = None
n_bits = None
for k,v in kw.iteritems():
# This is handy, though there's potentially future ambiguity
s = globals().get('nx_learn_' + k)
if not s:
s = globals().get('nx_learn_src_' + k)
if not s:
s = globals().get('nx_learn_dst_' + k)
if not s:
if k.startswith("src_"):
srckw[k[4:]] = v
elif k.startswith("dst_"):
dstkw[k[4:]] = v
elif k == "src":
assert isinstance(v, nx_learn_spec_src)
src_inst = v
elif k == "dst":
assert isinstance(v, nx_learn_spec_dst)
dst_inst = v
elif k == "n_bits":
n_bits = v
else:
raise RuntimeError("Don't know what to do with '%s'", (k,))
continue
if s._is_src:
assert src is None, "src already set"
src = s
srcarg = (v,)
if s._is_dst:
assert dst is None, "dst already set"
dst = s
dstarg = (v,)
if src_inst:
assert src is None, "can't set src and a spec type"
assert len(srckw) == 0, "can't set src params with src instance"
else:
assert src is not None, "no src set"
src_inst = src(*srcarg,**srckw)
if dst_inst:
assert dst is None, "can't set dst and a spec type"
assert len(dstkw) == 0, "can't set dst params with dst instance"
else:
if dst is not None: dst_inst = dst(*dstarg,**dstkw)
return cls.create(src_inst, dst_inst, n_bits)
chain = new
#def __init__ (self, src=None, dst=None, n_bits=0):
# self.src = src
# self.dst = dst
# self.n_bits = n_bits
def pack (self):
assert isinstance(self.src, nx_learn_spec_src),str(self.src)
assert isinstance(self.dst, nx_learn_spec_dst),str(self.dst)
assert self.n_bits < 1024
v = self.src.value << 13 | self.dst.value << 11 | self.n_bits
p = struct.pack("!H", v)
p += self.src.pack() + self.dst.pack()
return p
@classmethod
def unpack_new (cls, raw, offset = 0):
"""
May return a None object if it's padding
"""
offset,(v,) = of._unpack("!H", raw, offset)
if v == 0:
# Special case for padding
return offset, None
n_bits = v & 1023
offset,src = nx_learn_spec_src.unpack_subclass((v >> 13) & 1,
n_bits, raw, offset)
offset,dst = nx_learn_spec_dst.unpack_subclass((v >> 11) & 3,
n_bits, raw, offset)
return offset, cls(src, dst, n_bits)
# -----------------------------------------------------------------------
# NXM support
# -----------------------------------------------------------------------
#def conv (n, s):
# if s == 0: return b''
# nn = struct.pack("B", n & 0xff)
# n >>= 8
# return conv(n, s - 1) + nn
class _nxm_raw (object):
def _pack_value (self, v):
return v
def _unpack_value (self, v):
return v
class _nxm_numeric (object):
_size_table = [None, "!B", "!H", None, "!L", None, None, None, "!Q"]
def _pack_value (self, v):
size = self._size_table[self._nxm_length]
return struct.pack(size, v)
def _unpack_value (self, v):
try:
size = self._size_table[self._nxm_length]
return struct.unpack(size, v)[0]
except:
raise RuntimeError("Can't unpack %i bytes for %s"
% (self._nxm_length, self.__class__.__name__))
class _nxm_ip (object):
"""
Allows setting of IP address in many formats
The value can be any format known by IPAddr. If it's a string, it can
also have a trailing /netmask or /cidr-bits. If it's a tuple, the
first is assumed to be any kind of IP address and the second is either
a netmask or the number of network bits.
"""
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
if isinstance(value, tuple) or isinstance(value, list):
assert len(value) == 2
ip = value[0]
self.mask = value[1]
if isinstance(mask, (int,long)):
self.mask = mask
elif isinstance(value, basestring) and len(value)>4 and '/' in value:
temp = parse_cidr(value, infer=False)
ip = temp[0]
self.mask = 32 if temp[1] is None else temp[1]
else:
ip = value
self._value = self._pack_value(ip)
def _pack_value (self, v):
return IPAddr(v, networkOrder=False).toRaw()
def _unpack_value (self, v):
return IPAddr(v, networkOrder=True)
def _pack_mask (self, v):
if isinstance(v, (int, long)):
# Assume CIDR
if v > 32: v = 32
elif v < 0: v = 0
n = (0xffFFffFF << (32-v)) & 0xffFFffFF
return IPAddr(v, networkOrder=False).toRaw()
else:
return IPAddr(v).toRaw()
#def _unpack_mask (self, v):
# # Special unpacking for CIDR-style?
class _nxm_ipv6 (object):
"""
Placeholder until we have real IPv6 support
Allows setting of IP address in many formats
The value can be any format known by IPAddr. If it's a string, it can
also have a trailing /netmask or /cidr-bits. If it's a tuple, the
first is assumed to be any kind of IP address and the second is either
a netmask or the number of network bits.
"""
#TODO: Fix this when IPv6 is available
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
if isinstance(value, tuple) or isinstance(value, list):
assert len(value) == 2
ip = value[0]
self.mask = value[1]
if isinstance(mask, long):
self.mask = mask
#TODO
#elif isinstance(value, unicode) and u'/' in value:
# temp = parse_cidr6(value, infer=False)
# ip = temp[0]
# self.mask = 128 if temp[1] is None else temp[1]
else:
ip = value
self._value = self._pack_value(value)
def _pack_value (self, v):
return v
#return IPAddr6(v).raw
def _unpack_value (self, v):
return v
#return IPAddr6(v, raw=True)
def _pack_mask (self, v):
return v
#if isinstance(v, long):
# # Assume CIDR
# if v > 128: v = 128
# elif v < 0: v = 0
# n = (0xffFFffFF << (32-v)) & 0xffFFffFF
# return IPAddr6(v, networkOrder=False).toRaw()
#else:
# #return IPAddr6(v).raw
#def _unpack_mask (self, v):
# # Special unpacking for CIDR-style?
class _nxm_ether (object):
def _pack_value (self, v):
return EthAddr(v).toRaw()
def _unpack_value (self, v):
return EthAddr(v)
_nxm_type_to_class = {}
_nxm_name_to_type = {}
class nxm_entry (object):
#_nxm_type = _make_type(0x, )
#_nxm_length = # bytes of data not including mask (double for mask)
_force_mask = False
#TODO: make mask-omittable a class-level attribute?
@property
def nxm_vendor (self):
return self._nxm_type >> 7
@property
def nxm_field (self):
return self._nxm_type & 0x7f
@staticmethod
def unpack_header (raw, offset):
"""
Parses the NXM_HEADER
Returns (type,has_mask,length)
"""
h, = struct.unpack_from("!L", raw, offset)
offset += 4
t = h >> 9
has_mask = (h & (1<<8)) != 0
length = h & 0x7f
return t,has_mask,length
@staticmethod
def unpack_new (raw, offset):
t,has_mask,length = nxm_entry.unpack_header(raw, offset)
offset += 4
offset,data = of._read(raw, offset, length)
mask = None
if has_mask:
assert not (length & 1), "Odd length with mask"
mask = data[length/2:]
data = data[:length/2]
#NOTE: Should use _class_for_nxm_header?
c = _nxm_type_to_class.get(t)
if c is None:
#TODO: Refactor with learn spec field property?
e = NXM_GENERIC()
e._nxm_length = length
if has_mask:
e._nxm_length /= 2
e._nxm_type = t
# Alternate approach: Generate new subclass. To do: cache gen'd types?
#attrs = {'_nxm_type':t}
#attrs['_nxm_length'] = length/2 if has_mask else length
#c = type('nxm_type_'+str(t), (NXM_GENERIC,), attrs)
#e = c()
else:
e = c()
assert data is not None
assert len(data) == e._nxm_length, "%s != %s" % (len(data), e._nxm_length)
assert mask is None or len(mask) == e._nxm_length
e._value = data
e._mask = mask
if mask is not None:
e._force_mask = True
return offset, e
def clone (self):
n = self.__class__()
n._nxm_type = self._nxm_type
n._nxm_length = self._nxm_length
n._force_mask = self._force_mask
n.mask = self.mask
n.value = self.value
return n
def __init__ (self, value = None, mask = None):
super(nxm_entry, self).__init__()
self._value = None
self._mask = None
if value is None and mask is None: return # Sloppy
self.mask = mask
self.value = value # In case value overrides mask (IP), do value last
def get_length (self, omittable = False):
# Calculating length is slightly tricky with mask omission, etc.,
# so just pack it and find out, rather than duplicate the logic
# here.
return len(self.pack(omittable))
def __len__ (self):
return self.get_length()
def _unpack_mask (self, m):
return self._unpack_value(m)
def _pack_mask (self, m):
return self._pack_value(m)
@property
def is_reg (self):
return False
@property
def allow_mask (self):
return False
@property
def value (self):
return self._unpack_value(self._value)
@value.setter
def value (self, value):
self._value = self._pack_value(value)
@property
def mask (self):
if self._mask is None: return None
return self._unpack_mask(self._mask)
@mask.setter
def mask (self, value):
if self.allow_mask is False:
if value is not None:
raise RuntimeError("entry has no mask")
if value is None:
# This would normally be up to the pack function, but we add it
# here as a special case
self._mask = None
else:
self._mask = self._pack_mask(value)
def __eq__ (self, other):
if type(self) != type(other): return False
if self._nxm_type != other._nxm_type: return False
if self.value != other.value: return False
if self.mask != other.mask: return False
if self.is_reg != other.is_reg: return False
return True
def pack (self, omittable = False, header_only = False):
h = self._nxm_type << 9
mask = self._mask
if mask is not None:
assert len(mask) == self._nxm_length, "mask is wrong length"
if (mask.count("\x00") == self._nxm_length) and omittable:
return b''
if (mask.count("\xff") == self._nxm_length):
mask = None
if mask is None and self._force_mask:
mask = "\xff" * self._nxm_length
if mask is not None:
h |= (1 << 8)
h |= (self._nxm_length * 2)
else:
h |= self._nxm_length
r = struct.pack("!L", h)
if header_only: return r
value = self._value
assert value is not None
assert len(value) == self._nxm_length, "value is wrong length"
r += value
if mask is not None:
assert 0 == sum(ord(v)&(0xff&~ord(m)) for v,m in zip(value,mask)), \
"nonzero masked bits"
r += mask
return r
def __str__ (self):
r = self.__class__.__name__ + "(" + str(self.value)
if self.mask is not None:
if self.mask != ("\xff" * self._nxm_length):
r += "/" + str(self.mask)
#if self.is_reg: r += "[r]"
return r + ")"
def __repr__ (self):
return str(self)
class _nxm_numeric_entry (_nxm_numeric, nxm_entry):
pass
class _nxm_maskable (object):
@property
def allow_mask (self):
return True
class _nxm_maskable_numeric_entry (_nxm_maskable, _nxm_numeric_entry):
pass
class _nxm_reg (_nxm_maskable_numeric_entry):
@property
def is_reg (self):
return True
class NXM_GENERIC (_nxm_raw, nxm_entry):
@property
def allow_mask (self):
return True
def __str__ (self):
r = "NXM_%08x_%i" % (self.nxm_vendor, self.nxm_field)
r += "("
r += "".join("%02x" % (ord(x),) for x in self.value)
#+ repr(self.value)
if self.mask is not None:
if self.mask != ("\xff" * self._nxm_length):
r += "/" + repr(self.mask)
return r + ")"
def _make_type (vendor, field):
"""
Takes an NXM vendor and field and returns the whole type field
"""
return (vendor << 7) | field
def _fix_types (t):
"""
Helper for _make_nxm(_w)
Normalizes lists of superclasses
"""
try:
_ = t[0]
t = list(t)
except:
t = [t]
ok = False
for tt in t:
if _issubclass(tt, nxm_entry):
ok = True
break
if not ok:
t.append(nxm_entry)
#t = tuple(t)
return t
def _make_nxm (__name, __vendor, __field, __len = None, type = None,
**kw):
"""
Make a simple NXM entry class
"""
if type is None:
type = (_nxm_numeric_entry,)
else:
type = _fix_types(type)
t = _make_type(__vendor, __field)
kw['_nxm_type'] = t
if __len is not None: kw['_nxm_length'] = __len
import __builtin__
typ = __builtin__.type
c = typ(__name, tuple(type), kw)
_nxm_type_to_class[t] = c
_nxm_name_to_type[__name] = t
assert __name not in globals()
globals()[__name] = c
return c
def _make_nxm_w (*args, **kw):
"""
Make a simple wildcarded NXM entry class
"""
t = _fix_types(kw.pop('type', _nxm_maskable_numeric_entry))
ok = False
for tt in t:
if _issubclass(tt, _nxm_maskable):
ok = True
break
if not ok:
t.insert(0, _nxm_maskable)
return _make_nxm(*args, type=t, **kw)
def _class_for_nxm_header (raw):
"""
Given a raw nxm_entry header, return corresponding class
If we don't have a class for this header type, we generate one.
"""
t,has_mask,length = nxm_entry.unpack_header(raw, 0)
c = _nxm_type_to_class.get(t)
if c: return c
# Need to generate a new nxm_entry type.
# This code is totally untested.
vendor = (t >> 7) & 0xffff
field = t & 0x7f
typename = "NXM_UNKNOWN_"
typename += "%04x_%02x" % (vendor,field)
if has_mask: typename += "_MASKABLE"
types = [_nxm_raw]
if has_mask:
types.append(_nxm_maskable)
return _make_nxm(typename, vendor, field, length, types)
# -----------------------------------------------------------------------
# OpenFlow 1.0-compatible nxm_entries
# -----------------------------------------------------------------------
_make_nxm("NXM_OF_IN_PORT", 0, 0, 2)
_make_nxm_w("NXM_OF_ETH_DST", 0, 1, 6, type=_nxm_ether)
_make_nxm_w("NXM_OF_ETH_SRC", 0, 2, 6, type=_nxm_ether)
# Packet ethertype
_make_nxm("NXM_OF_ETH_TYPE", 0, 3, 2)
_make_nxm_w("NXM_OF_VLAN_TCI", 0, 4, 2)
_make_nxm_w("NXM_OF_IP_TOS", 0, 5, 1)
_make_nxm_w("NXM_OF_IP_PROTO", 0, 6, 1)
_make_nxm_w("NXM_OF_IP_SRC", 0, 7, 4, type=_nxm_ip)
_make_nxm_w("NXM_OF_IP_DST", 0, 8, 4, type=_nxm_ip)
# Maskable in OVS 1.6+
_make_nxm_w("NXM_OF_TCP_SRC", 0, 9, 2)
_make_nxm_w("NXM_OF_TCP_DST", 0, 10, 2)
# Maskable in OVS 1.6+
_make_nxm_w("NXM_OF_UDP_SRC", 0, 11, 2)
_make_nxm_w("NXM_OF_UDP_DST", 0, 12, 2)
_make_nxm("NXM_OF_ICMP_TYPE", 0, 13, 1)
_make_nxm("NXM_OF_ICMP_CODE", 0, 14, 1)
_make_nxm("NXM_OF_ARP_OP", 0, 15, 2)
# The IP address in an ethernet+IP ARP packet
# Fully maskable in OVS 1.8+, only CIDR-compatible masks before that
_make_nxm_w("NXM_OF_ARP_SPA", 0, 16, 4, type=_nxm_ip)
_make_nxm_w("NXM_OF_ARP_TPA", 0, 17, 4, type=_nxm_ip)
# -----------------------------------------------------------------------
# Nicira register nxm_entries
# -----------------------------------------------------------------------
NXM_NX_MAX_REGS = 16
# Array with all the register entries indexed by their number
# (they are also available as NXM_NX_REG0, etc.)
NXM_NX_REG = []
def _init_regs ():
for i in range(0, NXM_NX_MAX_REGS):
assert len(NXM_NX_REG) == i
n = "NXM_NX_REG" + str(i)
r = _make_nxm_w(n, 1, i, 4, type=_nxm_reg)
NXM_NX_REG.append(r)
globals()[n] = r
_init_regs()
def NXM_IS_NX_REG (o):
"""
Simulates macro from OVS
"""
return o.is_reg
# -----------------------------------------------------------------------
# Nicira nxm_entries
# -----------------------------------------------------------------------
# Tunnel properties
_make_nxm_w("NXM_NX_TUN_ID", 1, 16, 8)
_make_nxm_w("NXM_NX_TUN_IPV4_SRC", 1, 31, 4, type=_nxm_ip)
_make_nxm_w("NXM_NX_TUN_IPV4_DST", 1, 32, 4, type=_nxm_ip)
# The ethernet address in an ethernet+IP ARP packet
_make_nxm("NXM_NX_ARP_SHA", 1, 17, 6, type=_nxm_ether)
_make_nxm("NXM_NX_ARP_THA", 1, 18, 6, type=_nxm_ether)
# Fully maskable in OVS 1.8+, only CIDR-compatible masks before that
_make_nxm_w("NXM_NX_IPV6_SRC", 1, 19, 16, type=_nxm_ipv6)
_make_nxm_w("NXM_NX_IPV6_DST", 1, 20, 16, type=_nxm_ipv6)
_make_nxm("NXM_NX_ICMPV6_TYPE", 1, 21, 1)
_make_nxm("NXM_NX_ICMPV6_CODE", 1, 22, 1)
# IPv6 Neighbor Discovery target address
_make_nxm_w("NXM_NX_ND_TARGET", 1, 23, 16, type=_nxm_ipv6)
# IPv6 Neighbor Discovery source link-layer address
_make_nxm("NXM_NX_ND_SLL", 1, 24, 6, type=_nxm_ether)
# IPv6 Neighbor Discovery target link-layer address
_make_nxm("NXM_NX_ND_TLL", 1, 25, 6, type=_nxm_ether)
# Bits for NXM_NX_IP_FRAG
NX_IP_FRAG_ANY = 1 # It's the first/only fragment
NX_IP_FRAG_LATER = 3 # It's not the first fragment
# IP fragment information
#TODO: A custom type or types would make this nicer to use.
# For now, use with above flags.
_make_nxm_w("NXM_NX_IP_FRAG", 1, 26, 1)
# IPv6 flow label
_make_nxm("NXM_NX_IPV6_LABEL", 1, 27, 4)
# IP ECN bits
_make_nxm("NXM_NX_IP_ECN", 1, 28, 1)
_make_nxm("NXM_NX_IP_TTL", 1, 29, 1)
# Flow cookie
_make_nxm_w("NXM_NX_COOKIE", 1, 30, 8)
#@vendor_s_message('NXT_SET_ASYNC_CONFIG', 19)
class nx_async_config (nicira_base):
subtype = NXT_SET_ASYNC_CONFIG
_MIN_LENGTH = 40
def _init (self, kw):
# For master or other role
self.packet_in_mask = 0
self.port_status_mask = 0
self.flow_removed_mask = 0
# For slave role
self.packet_in_mask_slave = 0
self.port_status_mask_slave = 0
self.flow_removed_mask_slave = 0
def set_packet_in (self, bit, master=True, slave=True):
if master: self.packet_in_mask |= bit
if slave: self.packet_in_mask_slave |= bit
def set_port_status (self, bit, master=True, slave=True):
if master: self.port_status_mask |= bit
if slave: self.port_status_mask_slave |= bit
def set_flow_removed (self, bit, master=True, slave=True):
if master: selfflow_removed_mask |= bit
if slave: self.flow_removed_mask_slave |= bit
def _eq (self, other):
"""
Return True if equal
Overide this.
"""
for a in "packet_in port_status flow_removed".split():
a += "_mask"
if getattr(self, a) != getattr(other, a): return False
a += "_slave"
if getattr(self, a) != getattr(other, a): return False
return True
def _pack_body (self):
return struct.pack("!IIIIII",
self.packet_in_mask, self.packet_in_mask_slave,
self.port_status_mask, self.port_status_mask_slave,
self.flow_removed_mask, self.flow_removed_mask_slave)
def _unpack_body (self, raw, offset, avail):
"""
Unpack body in raw starting at offset.
Return new offset
"""
offset,tmp = of._unpack("!IIIIII", raw, offset)
self.packet_in_mask = tmp[0]
self.packet_in_mask_slave = tmp[1]
self.port_status_mask = tmp[2]
self.port_status_mask_slave = tmp[3]
self.flow_removed_mask = tmp[4]
self.flow_removed_mask_slave = tmp[5]
return offset
#@vendor_s_message('NXT_PACKET_IN', 17)
class nxt_packet_in (nicira_base, of.ofp_packet_in):
subtype = NXT_PACKET_IN
_MIN_LENGTH = 34
def _init (self, kw):
ofp_header.__init__(self)
self._buffer_id = None
self.reason = 0
self.data = None
self._total_len = None
self._match = None
if 'total_len' in kw:
self._total_len = kw.pop('total_len')
def _validate (self):
if self.data and (self.total_len < len(self.packed_data)):
return "total len less than data len"
@property
def in_port (self):
return self.match.of_in_port
@property
def match (self):
if self._match is None:
self._match = nx_match()
return self._match
@match.setter
def match (self, v):
self._match = v
def pack (self):
assert self._assert()
match_len = len(self.match)
packed = b""
packed += ofp_header.pack(self)
packed += struct.pack("!LL", NX_VENDOR_ID, self.subtype)
packed += struct.pack("!LHBBQH", self._buffer_id, self.total_len,
self.reason, self.table_id, self.cookie,
match_len)
packed += _PAD6
packed += match.pack()
packed += _PAD * ((match_len + 7)/8*8 - match_len)
packed += _PAD2
packed += self.packed_data
return packed
@property
def packed_data (self):
if self.data is None:
return b''
if hasattr(self.data, 'pack'):
# I don't think this is ever encountered...
return self.data.pack()
else:
return self.data
def unpack (self, raw, offset=0):
_offset = offset
offset,length = self._unpack_header(raw, offset)
offset,(vendor,subtype) = _unpack("!LL", raw, offset)
assert subtype == self.subtype
#print "vendor %08x subtype %i" % (vendor,subtype)
offset,(self._buffer_id, self._total_len, self.reason, self.table_id,
self.cookie, match_len) = _unpack("!LHBBQH", raw, offset)
offset = _skip(raw, offset, 6)
self.match = None
offset = self.match.unpack(raw, offset, match_len)
offset = _skip(raw, offset, (match_len + 7)//8*8 - match_len)
offset = _skip(raw, offset, 2)
offset,self.data = _read(raw, offset, length-(offset-_offset))
assert length == len(self)
return offset,length
def __len__ (self):
match_len = len(self.match)
l = 8 + 4 + 4
l += 4 + 2 + 1 + 1 + 8 + 2
l += 6
l += match_len
l += (match_len + 7)//8*8 - match_len
l += 2
l += len(self.packed_data)
return l
def __eq__ (self, other):
if not of.ofp_packet_in.__eq__(self, other): return False
if self.table_id != other.table_id: return False
if self.cookie != other.cookie: return False
if self.match != other.match: return False
return True
def __ne__ (self, other): return not self.__eq__(other)
def show (self, prefix=''):
outstr = ''
outstr += prefix + 'header: \n'
outstr += ofp_header.show(self, prefix + ' ')
outstr += prefix + 'buffer_id: ' + str(self.buffer_id) + '\n'
outstr += prefix + 'total_len: ' + str(self._total_len) + '\n'
outstr += prefix + 'reason: ' + str(self.reason) + '\n'
outstr += prefix + 'table_id: ' + str(self.table_id) + '\n'
outstr += prefix + 'match: ' + str(self.match) + '\n'
outstr += prefix + 'cookie: ' + str(self.cookie) + '\n'
#from pox.lib.util import hexdump
#outstr += prefix + 'data: ' + hexdump(self.data) + '\n'
outstr += prefix + 'datalen: ' + str(len(self.data)) + '\n'
return outstr
def field (self, t):
for i in self.match:
if type(i) == t:
return i
return None
class nx_match (object):
"""
A flexible match container
This has some magic. It acts as if it has properties for each
registered nxm_entry type. For example, there's a NXM_OF_IP_SRC
nxm_entry type for the source IP address, so you can do:
m = nx_match()
m.of_tcp_src = IPAddr("192.168.1.1")
Since nxm_entries can have masks, you actually get a number of pseudo-
properties, by appending "_mask", "_with_mask", or "_entry":
m.of_tcp_src_with_mask = ("192.168.1.0", "255.255.255.0")
# or...
m.of_tcp_src = "192.168.1.0"
m.of_tcp_src_mask = "255.255.255.0"
# or...
m.of_tcp_src_entry = NXM_OF_IP_SRC("192.168.1.1", "255.255.255.0")
nxm_entries themselves may have magic. For example, IP address
nxm_entries understand CIDR bits as part of the value, so you can do:
m.of_tcp_src = "192.168.1.0/24"
print m.of_tcp_src
> NXM_OF_IP_SRC(192.168.1.0/255.255.255.0)
*The order you add entries is significant*. If you have an entry
with a prerequisite, you must add the prerequisite first. It would be
really nice if nx_match could automatically adjust orderings to try to
satisfy nxm_entry prerequisties, and throw an exception if it's not
possible. This is a TODO item.
"""
#TODO: Test!
#TODO: Handle prerequisites (as described above)
def __init__ (self, *parts, **kw):
"""
Initialize this match
You can initialize either from a list of parts or from a bunch of
key/value pairs which are just like a shortcut for setting individual
properties.
"""
self._parts = list(parts)
self._dirty()
for k,v in kw:
setattr(self, k, v)
def unpack (self, raw, offset, avail):
del self._parts[:]
self._dirty()
stop = avail+offset
while offset < stop:
_o = offset
offset,entry = nxm_entry.unpack_new(raw, offset)
if offset == _o:
raise RuntimeError("No progress unpacking nxm_entries")
self._parts.append(entry)
#assert offset == stop
return offset
def pack (self, omittable = False):
return ''.join(x.pack(omittable) for x in self._parts)
def __eq__ (self, other):
if not isinstance(other, self.__class__): return False
return self._parts == other.__parts
def clone (self):
n = nx_match()
for p in self._parts:
n.append(p.clone())
return n
def __str__ (self):
return ','.join(str(m) for m in self._parts)
def show (self, prefix = ''):
return prefix + str(self)
@property
def _map (self):
if self._cache is None:
self._cache = {}
for i in self._parts:
assert i._nxm_type not in self._cache
self._cache[i._nxm_type] = i
return self._cache
def __len__ (self):
return sum(len(x) for x in self._parts)
def __getitem__ (self, index):
return self._parts[index]
def remove (self, t):
"""
Remove an entry
"""
if isinstance(t, nxm_entry):
t = t._nxm_type
if t not in self._map:
return
t = self._map[t]
self._parts.remove(t)
self._dirty()
def find (self, t):
"""
Returns nxm_entry of given type
"""
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
t = t._nxm_type
return self._map.get(t)
def index (self, t):
"""
Returns index of nxm_entry of given type
"""
if isinstance(t, nxm_entry):
t = t._nxm_type
if t not in self._map:
return -1 # Exception? None?
return self._parts.find(t)
def _dirty (self):
self._cache = None
def insert (self, position, item):
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
position = self.find(position)
if position == None:
self.append(item)
return
self._parts.insert(position, item)
def insert_after (self, position, item):
if isinstance(t, nxm_entry) or _issubclass(t, nxm_entry):
position = self.find(position)
if position == None:
self.append(item)
return
self._parts.insert(position+1, item)
def append (self, item):
"""
Add another nxm_entry to this match
"""
#TODO: check prereqs
if not isinstance(item, nxm_entry):
raise ValueError("Not an nxm_entry")
if self.find(item) is not None:
raise ValueError("Type already exists in this match")
self._parts.append(item)
self._dirty()
def __iadd__ (self, other):
self.append(other)
@staticmethod
def _fixname (name):
name = name.upper()
if not name.startswith("NXM_"):
name = "NXM_" + name
is_mask = with_mask = is_entry = False
if name.endswith("_MASK"):
if name.endswith("_WITH_MASK"):
with_mask = True
name = name.rsplit("_WITH_MASK", 1)[0]
else:
is_mask = True
name = name.rsplit("_MASK", 1)[0]
elif name.endswith("_ENTRY"):
name = name.rsplit("_ENTRY", 1)[0]
is_entry = True
nxt = _nxm_name_to_type.get(name)
#print name, nxt, is_mask, with_mask, is_entry
return name, nxt, is_mask, with_mask, is_entry
def __getattr__ (self, name):
name,nxt,is_mask,with_mask,is_entry = self._fixname(name)
if nxt is None:
raise AttributeError("No attribute " + name)
if nxt not in self._map:
if with_mask: return None,None
if is_mask: return None # Exception?
if is_entry: return None # Synthesize?
return None
v = self._map[nxt]
if with_mask: return (v.value,v.mask)
if is_mask: return v.mask
if is_entry: return v
return v.value
def __setattr__ (self, name, value):
if name.startswith('_'):
return object.__setattr__(self, name, value)
name,nxt,is_mask,with_mask,is_entry = self._fixname(name)
if nxt is None:
return object.__setattr__(self, name, value)
#raise AttributeError("No attribute " + name)
entry = self.find(nxt)
if is_entry: assert isinstance(value, nxm_entry)
if is_entry and (value is None) and (entry is not None):
# Shortcut entry removal
# Allow for non is_entry? Doing so is ambiguous if there are
# ever nxm_entries with None as a legal value.
self.remove(nxt)
return
if isinstance(value, nxm_entry):
if nxt != nxm_entry._nxm_type:
raise ValueError("Unmatched types")
if entry is None:
self.append(value)
else:
# hacky
entry.value = value.value
entry.mask = value.mask
else:
if entry is None:
entry = _nxm_type_to_class[nxt]()
self.append(entry)
# hacky
if with_mask:
entry.mask = value[1]
entry.value = value[0]
elif is_mask:
entry.mask = value
else:
entry.value = value
#from pox.lib.revent import Event
#class NXPacketIn (Event):
# def __init__ (self, connection, ofp):
# Event.__init__(self)
# self.connection = connection
# self.ofp = ofp
# self.port = ofp.in_port
# self.data = ofp.data
# self._parsed = None
# self.dpid = connection.dpid
#
# def parse (self):
# if self._parsed is None:
# self._parsed = ethernet(self.data)
# return self._parsed
#
# @property
# def parsed (self):
# """
# The packet as parsed by pox.lib.packet
# """
# return self.parse()
#
#core.openflow._eventMixin_events.add(NXPacketIn)
_old_unpacker = None
def _unpack_nx_vendor (raw, offset):
from pox.lib.util import hexdump
v = _unpack("!L", raw, offset + 8)[1][0]
if v != NX_VENDOR_ID:
return _old_unpacker(raw, offset)
subtype = _unpack("!L", raw, offset+8+4)[1][0]
if subtype == NXT_PACKET_IN:
npi = nxt_packet_in()
return npi.unpack(raw, offset)[0], npi
elif subtype == NXT_ROLE_REPLY:
nrr = nxt_role_reply()
return nrr.unpack(raw, offset)[0], nrr
else:
print "NO UNPACKER FOR",subtype
return _old_unpacker(raw, offset)
def _init_unpacker ():
global _old_unpacker
from pox.openflow.of_01 import unpackers
_old_unpacker = unpackers[of.OFPT_VENDOR]
unpackers[of.OFPT_VENDOR] = _unpack_nx_vendor
_old_handler = None
from pox.openflow import PacketIn
def _handle_VENDOR (con, msg):
if isinstance(msg, nxt_packet_in) and core.NX.convert_packet_in:
e = con.ofnexus.raiseEventNoErrors(PacketIn, con, msg)
if e is None or e.halt != True:
con.raiseEventNoErrors(PacketIn, con, msg)
# elif isinstance(msg, nxt_role_reply):
# pass
# #TODO
else:
_old_handler(con, msg)
def _init_handler ():
global _old_handler
from pox.openflow.of_01 import handlerMap, _set_handlers
_old_handler = handlerMap.get(of.OFPT_VENDOR)
handlerMap[of.OFPT_VENDOR] = _handle_VENDOR
_set_handlers()
class NX (object):
"""
Nicira extension component
"""
convert_packet_in = False
def launch (convert_packet_in = False):
_init_handler()
_init_unpacker()
nx = NX()
if convert_packet_in:
nx.convert_packet_in = True
core.register("NX", nx)
| apache-2.0 | 5,946,252,909,831,182,000 | 26.441417 | 79 | 0.592011 | false |
DOV-Vlaanderen/pydov | tests/test_types_itp_geotechnischecodering.py | 1 | 2223 | """Module grouping tests for the
pydov.types.interpretaties.GeotechnischeCodering class."""
from pydov.types.interpretaties import GeotechnischeCodering
from pydov.util.dovutil import build_dov_url
from tests.abstract import AbstractTestTypes
location_wfs_getfeature = \
'tests/data/types/interpretaties/geotechnische_codering/' \
'wfsgetfeature.xml'
location_wfs_feature = \
'tests/data/types/interpretaties/geotechnische_codering/feature.xml'
location_dov_xml = \
'tests/data/types/interpretaties/geotechnische_codering' \
'/geotechnische_codering.xml'
class TestGeotechnischeCodering(AbstractTestTypes):
"""Class grouping tests for the
pydov.types.interpretaties.GeotechnischeCodering class."""
datatype_class = GeotechnischeCodering
namespace = 'http://dov.vlaanderen.be/ocdov/interpretaties'
pkey_base = build_dov_url('data/interpretatie/')
field_names = [
'pkey_interpretatie', 'pkey_boring',
'betrouwbaarheid_interpretatie', 'x', 'y',
'start_interpretatie_mtaw',
'diepte_laag_van', 'diepte_laag_tot',
'hoofdnaam1_grondsoort', 'hoofdnaam2_grondsoort',
'bijmenging1_plaatselijk', 'bijmenging1_hoeveelheid',
'bijmenging1_grondsoort',
'bijmenging2_plaatselijk', 'bijmenging2_hoeveelheid',
'bijmenging2_grondsoort',
'bijmenging3_plaatselijk', 'bijmenging3_hoeveelheid',
'bijmenging3_grondsoort']
field_names_subtypes = [
'diepte_laag_van', 'diepte_laag_tot',
'hoofdnaam1_grondsoort', 'hoofdnaam2_grondsoort',
'bijmenging1_plaatselijk', 'bijmenging1_hoeveelheid',
'bijmenging1_grondsoort',
'bijmenging2_plaatselijk', 'bijmenging2_hoeveelheid',
'bijmenging2_grondsoort',
'bijmenging3_plaatselijk', 'bijmenging3_hoeveelheid',
'bijmenging3_grondsoort']
field_names_nosubtypes = [
'pkey_interpretatie', 'pkey_boring',
'betrouwbaarheid_interpretatie', 'x', 'y',
'start_interpretatie_mtaw']
valid_returnfields = ('pkey_interpretatie', 'pkey_boring')
valid_returnfields_subtype = (
'pkey_interpretatie', 'diepte_laag_van', 'diepte_laag_tot')
inexistent_field = 'onbestaand'
| mit | -2,921,369,570,890,383,400 | 39.418182 | 72 | 0.701305 | false |
dls-controls/pymalcolm | malcolm/modules/builtin/parameters.py | 1 | 1706 | from typing import Union
from annotypes import Anno, add_call_types
default_desc = "Default value for parameter. If not specified, parameter is required"
with Anno("Specify that this class will take a parameter name"):
AName = str
with Anno("Description of this parameter"):
ADescription = str
with Anno(default_desc):
AStringDefault = str
with Anno(default_desc):
AFloat64Default = float
with Anno(default_desc):
AInt32Default = int
with Anno("The Anno representing the parameter"):
AAnno = Union[Anno]
def common_args(name, default):
for s in name.split("_"):
# Only support UPP3R or l0wer case for each _ section
assert s.islower() or s.isupper(), "Parameter %s should be snake_case" % (name,)
ret = dict(name=name)
if default is not None:
ret["default"] = default
return ret
@add_call_types
def string(
name: AName, description: ADescription, default: AStringDefault = None
) -> AAnno:
"""Add a string parameter to be passed when instantiating this YAML file"""
args = common_args(name, default)
return Anno(description, **args).set_typ(str)
@add_call_types
def float64(
name: AName, description: ADescription, default: AFloat64Default = None
) -> AAnno:
"""Add a float64 parameter to be passed when instantiating this YAML file"""
args = common_args(name, default)
return Anno(description, **args).set_typ(float)
@add_call_types
def int32(
name: AName, description: ADescription, default: AInt32Default = None
) -> AAnno:
"""Add an int32 parameter to be passed when instantiating this YAML file"""
args = common_args(name, default)
return Anno(description, **args).set_typ(int)
| apache-2.0 | -1,394,620,227,895,945,700 | 30.018182 | 88 | 0.698124 | false |
mmlacak/crochess | book/py/scene_mayan_ascendancy.py | 1 | 25347 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2018 - 2020 Mario Mlačak, [email protected]
# Licensed under 3-clause (modified) BSD license. See LICENSE for details.
from util import in_range
import gen_steps as GS
from piece import PieceType
from board import BoardType, Board
from mark import MarkType
from corner import Corner
from scene import Scene
class SceneMayanAscendancyMixin:
def scn_ma_01_pyramid_activation_init(self, bt=BoardType.MayanAscendancy):
# move_pyramid_activation_init
scene = Scene('scn_ma_01_pyramid_activation_init', bt)
start = (11, 3)
scene.board.set_piece(3, 7, piece=PieceType.Pyramid)
scene.board.set_piece(6, 7, piece=PieceType.Bishop)
scene.board.set_piece(*start, piece=PieceType.Pegasus)
scene.board.set_piece(3, 9, piece=-PieceType.Knight)
scene.board.set_piece(3, 3, piece=-PieceType.Bishop)
# direction <-2, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-2, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-2, 1), ]) )
scene.append_text("1", *coords(), corner=Corner.UpperRight)
scene.append_text("2", *coords(), corner=Corner.UpperRight)
scene.append_text("3", *coords(), corner=Corner.UpperRight)
scene.append_text("4", *coords(), corner=Corner.UpperRight, mark_type=MarkType.Action)
return scene
def scn_ma_02_pyramid_activated(self, bt=BoardType.MayanAscendancy):
# move_pyramid_activated
scene = Scene('scn_ma_02_pyramid_activated', bt)
start = (3, 7)
scene.board.set_piece(*start, piece=PieceType.Pegasus)
scene.board.set_piece(6, 7, piece=PieceType.Bishop)
scene.board.set_piece(3, 9, piece=-PieceType.Knight)
scene.board.set_piece(3, 3, piece=-PieceType.Bishop)
# direction <1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords(), mark_type=MarkType.Blocked)
scene.append_text("4", *coords(), mark_type=MarkType.Blocked)
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords(), mark_type=MarkType.Action)
scene.append_text("3", *coords(), mark_type=MarkType.Blocked)
scene.append_text("4", *coords(), mark_type=MarkType.Blocked)
# direction <-1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
# direction <0, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords(), mark_type=MarkType.Action)
return scene
def scn_ma_03_pyramid_activation_end(self, bt=BoardType.MayanAscendancy):
# move_pyramid_activation_end
scene = Scene('scn_ma_03_pyramid_activation_end', bt)
scene.board.set_piece(3, 7, PieceType.Pegasus)
scene.board.set_piece(6, 7, PieceType.Bishop)
scene.board.set_piece(3, 9, PieceType.Pyramid)
scene.board.set_piece(3, 3, -PieceType.Bishop)
return scene
#
# Pawn activating Pyramid
def scn_ma_04_pyramid_activation_by_pawn(self, bt=BoardType.MayanAscendancy):
# move_pyramid_activation_by_pawn
scene = Scene('scn_ma_04_pyramid_activation_by_pawn', bt)
scene.board.set_piece(4, 2, piece=PieceType.Pawn)
scene.board.set_piece(3, 3, piece=PieceType.Pyramid)
scene.board.set_piece(5, 6, piece=PieceType.Pawn)
scene.board.set_piece(5, 7, piece=PieceType.Pyramid)
start = (8, 1)
scene.board.set_piece(*start, piece=PieceType.Pawn)
scene.board.set_piece(8, 4, piece=PieceType.Pyramid)
# capture-fields
scene.append_arrow(4, 2, 3, 3, mark_type=MarkType.Action)
scene.append_arrow(4, 2, 5, 3, mark_type=MarkType.Blocked)
scene.append_text("1", 4, 2, corner=Corner.UpperRightFieldMarker, mark_type=MarkType.Blocked)
# step-fields 1
scene.append_arrow(5, 6, 5, 7, mark_type=MarkType.Blocked)
scene.append_text("2", 5, 6, corner=Corner.UpperRight, mark_type=MarkType.Blocked)
# step-fields 2
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_text("3", *start, corner=Corner.UpperRight, mark_type=MarkType.Blocked)
return scene
#
# Promotion
def scn_ma_05_promo_init(self, bt=BoardType.MayanAscendancy):
# move_pyramid_promo_init
scene = Scene('scn_ma_05_promo_init', bt)
start = (11, 3)
scene.board.set_piece(3, 7, piece=PieceType.Pyramid)
scene.board.set_piece(7, 7, piece=PieceType.Pawn)
scene.board.set_piece(3, 5, piece=PieceType.Pawn)
scene.board.set_piece(*start, piece=PieceType.Pegasus)
scene.board.set_piece(5, 0, piece=PieceType.Queen)
scene.append_text("1", 7, 7, corner=Corner.LowerRight, mark_type=MarkType.Blocked)
scene.append_text("2", 3, 5, corner=Corner.LowerRight, mark_type=MarkType.Blocked)
# direction <-2, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-2, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-2, 1), ]) )
scene.append_text("1", *coords(), corner=Corner.UpperRight)
scene.append_text("2", *coords(), corner=Corner.UpperRight)
scene.append_text("3", *coords(), corner=Corner.UpperRight)
scene.append_text("4", *coords(), corner=Corner.UpperRight, mark_type=MarkType.Action)
return scene
def scn_ma_06_promo_pyramid_activated(self, bt=BoardType.MayanAscendancy):
# move_pyramid_promo_activate
scene = Scene('scn_ma_06_promo_pyramid_activated', bt)
start = (3, 7)
scene.board.set_piece(*start, piece=PieceType.Pegasus)
scene.board.set_piece(7, 7, piece=PieceType.Pawn)
scene.board.set_piece(3, 5, piece=PieceType.Pawn)
scene.board.set_piece(5, 0, piece=PieceType.Queen)
scene.append_text("1", 7, 7, corner=Corner.LowerRight, mark_type=MarkType.Blocked)
scene.append_text("2", 3, 5, corner=Corner.LowerRight, mark_type=MarkType.Blocked)
# direction <1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords(), mark_type=MarkType.Action)
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords())
# direction <-1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
# direction <0, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ]) )
scene.append_text("1", *coords() )
scene.append_text("2", *coords(), mark_type=MarkType.Blocked)
scene.append_text("3", *coords(), mark_type=MarkType.Blocked)
scene.append_text("4", *coords(), mark_type=MarkType.Blocked)
return scene
def scn_ma_07_promo_end(self, bt=BoardType.MayanAscendancy):
# move_pyramid_promo_end
scene = Scene('scn_ma_07_promo_end', bt)
scene.board.set_piece(3, 7, piece=PieceType.Pegasus)
scene.board.set_piece(7, 7, piece=PieceType.Queen)
scene.board.set_piece(3, 5, piece=PieceType.Pawn)
scene.board.set_piece(5, 0, piece=PieceType.Queen)
scene.append_text("2", 3, 5, corner=Corner.LowerRight, mark_type=MarkType.Blocked)
return scene
#
# Conversion
def scn_ma_08_conversion_init(self, bt=BoardType.MayanAscendancy):
# move_pyramid_conversion_init
scene = Scene('scn_ma_08_conversion_init', bt)
start = (7, 8)
scene.board.set_piece(3, 4, piece=PieceType.Pyramid)
scene.board.set_piece(7, 4, piece=-PieceType.Rook)
scene.board.set_piece(3, 7, piece=-PieceType.Bishop)
scene.board.set_piece(*start, piece=PieceType.Bishop)
scene.board.set_piece(0, 0, piece=PieceType.Rook)
scene.board.set_piece(11, 0, piece=PieceType.Rook)
# direction <-1, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, -1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords(), mark_type=MarkType.Action )
return scene
def scn_ma_09_conversion_pyramid_activated(self, bt=BoardType.MayanAscendancy):
# move_pyramid_conversion_activated
scene = Scene('scn_ma_09_conversion_pyramid_activated', bt)
start = (3, 4)
scene.board.set_piece(*start, piece=PieceType.Bishop)
scene.board.set_piece(7, 4, piece=-PieceType.Rook)
scene.board.set_piece(3, 7, piece=-PieceType.Bishop)
scene.board.set_piece(0, 0, piece=PieceType.Rook)
scene.board.set_piece(11, 0, piece=PieceType.Rook)
# direction <1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords(), mark_type=MarkType.Action )
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords(), mark_type=MarkType.Blocked )
scene.append_text("4", *coords(), mark_type=MarkType.Blocked )
# direction <-1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
# direction <0, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords())
return scene
def scn_ma_10_conversion_end(self, bt=BoardType.MayanAscendancy):
# move_pyramid_conversion_end
scene = Scene('scn_ma_10_conversion_end', bt)
scene.board.set_piece(3, 4, piece=PieceType.Bishop)
scene.board.set_piece(7, 4, piece=PieceType.Rook)
scene.board.set_piece(3, 7, piece=-PieceType.Bishop)
scene.board.set_piece(0, 0, piece=PieceType.Rook)
scene.board.set_piece(11, 0, piece=PieceType.Rook)
return scene
#
# Pyramid cascading
def scn_ma_11_cascading_init(self, bt=BoardType.MayanAscendancy):
# move_pyramid_cascading_init
scene = Scene('scn_ma_11_cascading_init', bt)
start = (10, 1)
scene.board.set_piece(*start, piece=PieceType.Queen)
scene.board.set_piece(2, 6, piece=PieceType.Bishop)
pyramid_1 = (5, 6)
scene.board.set_piece(*pyramid_1, piece=PieceType.Pyramid)
pyramid_2 = (8, 6)
scene.board.set_piece(*pyramid_2, piece=PieceType.Pyramid)
pyramid_3 = (5, 1)
scene.board.set_piece(*pyramid_3, piece=PieceType.Pyramid)
offset = (0.45, 0.15)
# direction <-1, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 1), ]) )
scene.append_text("1", *coords(), corner=Corner.UpperRight)
scene.append_text("2", *coords(), corner=Corner.UpperRight)
scene.append_text("3", *coords(), corner=Corner.UpperRight)
scene.append_text("4", *coords(), corner=Corner.UpperRight)
scene.append_text("5", *coords(), corner=Corner.UpperRight, mark_type=MarkType.Action )
# pyramids
scene.append_text("1", *GS.add(pyramid_1, offset), corner=Corner.Position, mark_type=MarkType.Blocked )
scene.append_text("2", *GS.add(pyramid_2, offset), corner=Corner.Position, mark_type=MarkType.Blocked )
scene.append_text("3", *GS.add(pyramid_3, offset), corner=Corner.Position, mark_type=MarkType.Blocked )
return scene
def scn_ma_12_cascading_pyramid_1_activated(self, bt=BoardType.MayanAscendancy):
# move_pyramid_cascading_activated_1
scene = Scene('scn_ma_12_cascading_pyramid_1_activated', bt)
start = (5, 6)
scene.board.set_piece(*start, piece=PieceType.Queen)
scene.board.set_piece(2, 6, piece=PieceType.Bishop)
pyramid_2 = (8, 6)
scene.board.set_piece(*pyramid_2, piece=PieceType.Pyramid)
pyramid_3 = (5, 1)
scene.board.set_piece(*pyramid_3, piece=PieceType.Pyramid)
offset = (0.45, 0.15)
# direction <-1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords(), mark_type=MarkType.Blocked)
scene.append_text("4", *coords(), mark_type=MarkType.Blocked)
scene.append_text("5", *coords(), mark_type=MarkType.Blocked)
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords())
scene.append_text("5", *coords())
# direction <0, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords())
scene.append_text("4", *coords())
scene.append_text("5", *coords(), mark_type=MarkType.Blocked )
# direction <1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
scene.append_arrow( *coords(), mark_type=MarkType.Action )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
scene.append_arrow( *coords(), mark_type=MarkType.Blocked )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
scene.append_text("3", *coords(), mark_type=MarkType.Action )
scene.append_text("4", *coords(), mark_type=MarkType.Blocked )
scene.append_text("5", *coords(), mark_type=MarkType.Blocked )
# pyramids
scene.append_text("2", *GS.add(pyramid_2, offset), corner=Corner.Position, mark_type=MarkType.Blocked )
scene.append_text("3", *GS.add(pyramid_3, offset), corner=Corner.Position, mark_type=MarkType.Blocked )
return scene
def scn_ma_13_cascading_pyramid_2_activated(self, bt=BoardType.MayanAscendancy):
# move_pyramid_cascading_activated_2
scene = Scene('scn_ma_13_cascading_pyramid_2_activated', bt)
scene.board.set_piece(5, 6, piece=PieceType.Queen)
scene.board.set_piece(2, 6, piece=PieceType.Bishop)
start = (8, 6)
scene.board.set_piece(*start, piece=PieceType.Pyramid)
pyramid_3 = (5, 1)
scene.board.set_piece(*pyramid_3, piece=PieceType.Pyramid)
offset = (0.45, 0.15)
# direction <-1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(-1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
# direction <1, 0>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(1, 0), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
# direction <0, -1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, -1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
# direction <0, 1>
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ], include_prev=True) )
scene.append_arrow( *coords() )
scene.append_arrow( *coords() )
coords = GS.gen_next( GS.gen_steps(start=start, rels=[(0, 1), ]) )
scene.append_text("1", *coords())
scene.append_text("2", *coords())
# pyramids
scene.append_text("1", *GS.add(start, offset), corner=Corner.Position, mark_type=MarkType.Blocked)
scene.append_text("3", *GS.add(pyramid_3, offset), corner=Corner.Position, mark_type=MarkType.Blocked)
return scene
def scn_ma_14_cascading_end(self, bt=BoardType.MayanAscendancy):
# move_pyramid_cascading_end
scene = Scene('scn_ma_14_cascading_end', bt)
scene.board.set_piece(5, 6, piece=PieceType.Queen)
scene.board.set_piece(2, 6, piece=PieceType.Bishop)
pyramid_1 = (8, 6)
scene.board.set_piece(*pyramid_1, piece=PieceType.Pyramid)
pyramid_2 = (8, 8)
scene.board.set_piece(*pyramid_2, piece=PieceType.Pyramid)
pyramid_3 = (5, 1)
scene.board.set_piece(*pyramid_3, piece=PieceType.Pyramid)
offset = (0.45, 0.15)
scene.append_text("1", *GS.add(pyramid_1, offset), corner=Corner.Position, mark_type=MarkType.Blocked)
scene.append_text("2", *GS.add(pyramid_2, offset), corner=Corner.Position, mark_type=MarkType.Blocked)
scene.append_text("3", *GS.add(pyramid_3, offset), corner=Corner.Position, mark_type=MarkType.Blocked)
return scene
#
# Pyramid against royal powers (King free from actions, effects of passive pieces)
def scn_ma_15_pyramid_vs_king(self, bt=BoardType.MayanAscendancy):
# move_pyramid_vs_king
scene = Scene('scn_ma_15_pyramid_vs_king', bt, width=12, height=3)
scene.board.set_piece(4, 0, -PieceType.King)
scene.board.set_piece(3, 0, PieceType.Pyramid)
scene.board.set_piece(2, 1, PieceType.Queen)
scene.append_arrow(2, 1, 3, 0)
scene.append_arrow(3, 0, 4, 0, mark_type=MarkType.Illegal )
return scene
def scn_ma_16_pyramid_vs_bishop(self, bt=BoardType.MayanAscendancy):
# move_pyramid_vs_bishop
scene = Scene('scn_ma_16_pyramid_vs_bishop', bt, width=12, height=3)
scene.board.set_piece(4, 0, -PieceType.Bishop)
scene.board.set_piece(3, 0, PieceType.Pyramid)
scene.board.set_piece(2, 1, PieceType.Queen)
scene.append_arrow(2, 1, 3, 0)
scene.append_arrow(3, 0, 4, 0, mark_type=MarkType.Action)
return scene
| bsd-3-clause | 3,875,221,039,199,557,600 | 39.61859 | 111 | 0.605855 | false |
cloudant/mango | test/05-index-selection-test.py | 1 | 3377 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import user_docs
class IndexSelectionTests(mango.UserDocsTests):
@classmethod
def setUpClass(klass):
super(IndexSelectionTests, klass).setUpClass()
user_docs.add_text_indexes(klass.db, {})
def test_basic(self):
resp = self.db.find({"name.last": "A last name"}, explain=True)
assert resp["index"]["type"] == "json"
def test_with_and(self):
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "This doesn't have to match anything."
}, explain=True)
assert resp["index"]["type"] == "json"
def test_with_text(self):
resp = self.db.find({
"$text" : "Stephanie",
"name.first": "Stephanie",
"name.last": "This doesn't have to match anything."
}, explain=True)
assert resp["index"]["type"] == "text"
def test_no_view_index(self):
resp = self.db.find({"name.first": "Ohai!"}, explain=True)
assert resp["index"]["type"] == "text"
def test_with_or(self):
resp = self.db.find({
"$or": [
{"name.first": "Stephanie"},
{"name.last": "This doesn't have to match anything."}
]
}, explain=True)
assert resp["index"]["type"] == "text"
def test_use_most_columns(self):
# ddoc id for the age index
ddocid = "_design/ad3d537c03cd7c6a43cf8dff66ef70ea54c2b40f"
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "Something or other",
"age": {"$gt": 1}
}, explain=True)
assert resp["index"]["ddoc"] != "_design/" + ddocid
resp = self.db.find({
"name.first": "Stephanie",
"name.last": "Something or other",
"age": {"$gt": 1}
}, use_index=ddocid, explain=True)
assert resp["index"]["ddoc"] == ddocid
class MultiTextIndexSelectionTests(mango.UserDocsTests):
@classmethod
def setUpClass(klass):
super(MultiTextIndexSelectionTests, klass).setUpClass()
klass.db.create_text_index(ddoc="foo", analyzer="keyword")
klass.db.create_text_index(ddoc="bar", analyzer="email")
def test_view_ok_with_multi_text(self):
resp = self.db.find({"name.last": "A last name"}, explain=True)
assert resp["index"]["type"] == "json"
def test_multi_text_index_is_error(self):
try:
self.db.find({"$text": "a query"}, explain=True)
except Exception, e:
assert e.response.status_code == 400
def test_use_index_works(self):
resp = self.db.find({"$text": "a query"}, use_index="foo", explain=True)
assert resp["index"]["ddoc"] == "_design/foo"
| apache-2.0 | -7,581,040,958,048,683,000 | 35.706522 | 80 | 0.578028 | false |
bayesimpact/bob-emploi | frontend/server/asynchronous/i18n/test/download_translations_test.py | 1 | 3581 | """Unit tests for the download_translations module."""
import json
import os
import tempfile
import unittest
from unittest import mock
from airtable import airtable
import airtablemock
from bob_emploi.frontend.server.asynchronous.i18n import download_translations
class DownloadTranslationsTests(airtablemock.TestCase):
"""Unit tests for downloading translations."""
def setUp(self) -> None:
super().setUp()
temp_fd, self._tmp_json_file = tempfile.mkstemp(suffix='.json')
os.close(temp_fd)
airtablemock.create_empty_table('appkEc8N0Bw4Uok43', 'translations')
def tearDown(self) -> None:
os.unlink(self._tmp_json_file)
def test_main(self) -> None:
"""Test basic download_translations call."""
base = airtable.Airtable('appkEc8N0Bw4Uok43', '')
base.create('translations', {
'en': 'A string to translate in English',
'fr': 'A string to translate in French',
'field': 'A custom field, not really a translation',
'string': 'A string to translate',
})
base.create('translations', {
'en': 'A string to translate feminine',
'string': 'A string to translate_FEMININE',
})
base.create('translations', {
'fr': 'Another string to translate',
'string': 'Another string not in the server extraction',
})
# Translation for a Mailjet template subject.
base.create('translations', {
'en': 'Update your Bob password',
'string': 'Modifiez votre mot de passe Bob Emploi',
})
pot_file = os.path.join(os.path.dirname(__file__), 'testdata/strings.pot')
download_translations.main([
'--api-key', 'my-own-api-key',
'--strings', pot_file,
'--output', self._tmp_json_file,
])
with open(self._tmp_json_file, 'r') as output_file:
output_json = json.load(output_file)
self.assertEqual({
'A string to translate': {
'en': 'A string to translate in English',
'fr': 'A string to translate in French',
},
'A string to translate_FEMININE': {
'en': 'A string to translate feminine',
},
'Modifiez votre mot de passe Bob Emploi': {
'en': 'Update your Bob password',
}
}, output_json)
@mock.patch.dict(os.environ, {'FAIL_ON_MISSING_TRANSLATIONS': '1'})
def test_missing_keys(self) -> None:
"""Test download_translations when translations are missing."""
base = airtable.Airtable('appkEc8N0Bw4Uok43', '')
base.create('translations', {
'en': 'A string to translate in English',
'fr': 'A string to translate in French',
'field': 'A custom field, not really a translation',
'string': 'A string to translate',
})
pot_file = os.path.join(os.path.dirname(__file__), 'testdata/many-strings.pot')
with self.assertRaises(KeyError) as error:
download_translations.main([
'--api-key', 'my-own-api-key',
'--strings', pot_file,
'--output', self._tmp_json_file,
])
self.assertIn('A string missing translation', str(error.exception))
self.assertIn('Another string missing translation', str(error.exception))
self.assertNotIn('A string to translate', str(error.exception))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,795,036,928,520,266,000 | 33.432692 | 87 | 0.578051 | false |
commial/miasm | miasm/ir/translators/z3_ir.py | 1 | 10224 | from builtins import map
from builtins import range
import imp
import logging
# Raise an ImportError if z3 is not available WITHOUT actually importing it
imp.find_module("z3")
from miasm.ir.translators.translator import Translator
log = logging.getLogger("translator_z3")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("[%(levelname)-8s]: %(message)s"))
log.addHandler(console_handler)
log.setLevel(logging.WARNING)
class Z3Mem(object):
"""Memory abstration for TranslatorZ3. Memory elements are only accessed,
never written. To give a concrete value for a given memory cell in a solver,
add "mem32.get(address, size) == <value>" constraints to your equation.
The endianness of memory accesses is handled accordingly to the "endianness"
attribute.
Note: Will have one memory space for each addressing size used.
For example, if memory is accessed via 32 bits values and 16 bits values,
these access will not occur in the same address space.
"""
def __init__(self, endianness="<", name="mem"):
"""Initializes a Z3Mem object with a given @name and @endianness.
@endianness: Endianness of memory representation. '<' for little endian,
'>' for big endian.
@name: name of memory Arrays generated. They will be named
name+str(address size) (for example mem32, mem16...).
"""
# Import z3 only on demand
global z3
import z3
if endianness not in ['<', '>']:
raise ValueError("Endianness should be '>' (big) or '<' (little)")
self.endianness = endianness
self.mems = {} # Address size -> memory z3.Array
self.name = name
def get_mem_array(self, size):
"""Returns a z3 Array used internally to represent memory for addresses
of size @size.
@size: integer, size in bit of addresses in the memory to get.
Return a z3 Array: BitVecSort(size) -> BitVecSort(8).
"""
try:
mem = self.mems[size]
except KeyError:
# Lazy instantiation
self.mems[size] = z3.Array(self.name + str(size),
z3.BitVecSort(size),
z3.BitVecSort(8))
mem = self.mems[size]
return mem
def __getitem__(self, addr):
"""One byte memory access. Different address sizes with the same value
will result in different memory accesses.
@addr: a z3 BitVec, the address to read.
Return a z3 BitVec of size 8 bits representing a memory access.
"""
size = addr.size()
mem = self.get_mem_array(size)
return mem[addr]
def get(self, addr, size):
""" Memory access at address @addr of size @size.
@addr: a z3 BitVec, the address to read.
@size: int, size of the read in bits.
Return a z3 BitVec of size @size representing a memory access.
"""
original_size = size
if original_size % 8 != 0:
# Size not aligned on 8bits -> read more than size and extract after
size = ((original_size // 8) + 1) * 8
res = self[addr]
if self.is_little_endian():
for i in range(1, size // 8):
res = z3.Concat(self[addr+i], res)
else:
for i in range(1, size //8):
res = z3.Concat(res, self[addr+i])
if size == original_size:
return res
else:
# Size not aligned, extract right sized result
return z3.Extract(original_size-1, 0, res)
def is_little_endian(self):
"""True if this memory is little endian."""
return self.endianness == "<"
def is_big_endian(self):
"""True if this memory is big endian."""
return not self.is_little_endian()
class TranslatorZ3(Translator):
"""Translate a Miasm expression to an equivalent z3 python binding
expression. Memory is abstracted via z3.Array (see Z3Mem).
The result of from_expr will be a z3 Expr.
If you want to interact with the memory abstraction after the translation,
you can instantiate your own Z3Mem, that will be equivalent to the one
used by TranslatorZ3.
"""
# Implemented language
__LANG__ = "z3"
# Operations translation
trivial_ops = ["+", "-", "/", "%", "&", "^", "|", "*", "<<"]
def __init__(self, endianness="<", loc_db=None, **kwargs):
"""Instance a Z3 translator
@endianness: (optional) memory endianness
"""
# Import z3 only on demand
global z3
import z3
super(TranslatorZ3, self).__init__(**kwargs)
self._mem = Z3Mem(endianness)
self.loc_db = loc_db
def from_ExprInt(self, expr):
return z3.BitVecVal(int(expr), expr.size)
def from_ExprId(self, expr):
return z3.BitVec(str(expr), expr.size)
def from_ExprLoc(self, expr):
if self.loc_db is None:
# No loc_db, fallback to default name
return z3.BitVec(str(expr), expr.size)
loc_key = expr.loc_key
offset = self.loc_db.get_location_offset(loc_key)
if offset is not None:
return z3.BitVecVal(offset, expr.size)
# fallback to default name
return z3.BitVec(str(loc_key), expr.size)
def from_ExprMem(self, expr):
addr = self.from_expr(expr.ptr)
return self._mem.get(addr, expr.size)
def from_ExprSlice(self, expr):
res = self.from_expr(expr.arg)
res = z3.Extract(expr.stop-1, expr.start, res)
return res
def from_ExprCompose(self, expr):
res = None
for arg in expr.args:
e = z3.Extract(arg.size-1, 0, self.from_expr(arg))
if res != None:
res = z3.Concat(e, res)
else:
res = e
return res
def from_ExprCond(self, expr):
cond = self.from_expr(expr.cond)
src1 = self.from_expr(expr.src1)
src2 = self.from_expr(expr.src2)
return z3.If(cond != 0, src1, src2)
def _abs(self, z3_value):
return z3.If(z3_value >= 0,z3_value,-z3_value)
def _sdivC(self, num, den):
"""Divide (signed) @num by @den (z3 values) as C would
See modint.__div__ for implementation choice
"""
result_sign = z3.If(num * den >= 0,
z3.BitVecVal(1, num.size()),
z3.BitVecVal(-1, num.size()),
)
return z3.UDiv(self._abs(num), self._abs(den)) * result_sign
def from_ExprOp(self, expr):
args = list(map(self.from_expr, expr.args))
res = args[0]
if len(args) > 1:
for arg in args[1:]:
if expr.op in self.trivial_ops:
res = eval("res %s arg" % expr.op)
elif expr.op == ">>":
res = z3.LShR(res, arg)
elif expr.op == "a>>":
res = res >> arg
elif expr.op == "<<<":
res = z3.RotateLeft(res, arg)
elif expr.op == ">>>":
res = z3.RotateRight(res, arg)
elif expr.op == "sdiv":
res = self._sdivC(res, arg)
elif expr.op == "udiv":
res = z3.UDiv(res, arg)
elif expr.op == "smod":
res = res - (arg * (self._sdivC(res, arg)))
elif expr.op == "umod":
res = z3.URem(res, arg)
elif expr.op == "==":
res = z3.If(
args[0] == args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<u":
res = z3.If(
z3.ULT(args[0], args[1]),
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<s":
res = z3.If(
args[0] < args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<=u":
res = z3.If(
z3.ULE(args[0], args[1]),
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
elif expr.op == "<=s":
res = z3.If(
args[0] <= args[1],
z3.BitVecVal(1, 1),
z3.BitVecVal(0, 1)
)
else:
raise NotImplementedError("Unsupported OP yet: %s" % expr.op)
elif expr.op == 'parity':
arg = z3.Extract(7, 0, res)
res = z3.BitVecVal(1, 1)
for i in range(8):
res = res ^ z3.Extract(i, i, arg)
elif expr.op == '-':
res = -res
elif expr.op == "cnttrailzeros":
size = expr.size
src = res
res = z3.If(src == 0, size, src)
for i in range(size - 1, -1, -1):
res = z3.If((src & (1 << i)) != 0, i, res)
elif expr.op == "cntleadzeros":
size = expr.size
src = res
res = z3.If(src == 0, size, src)
for i in range(size, 0, -1):
index = - i % size
out = size - (index + 1)
res = z3.If((src & (1 << index)) != 0, out, res)
elif expr.op.startswith("zeroExt"):
arg, = expr.args
res = z3.ZeroExt(expr.size - arg.size, self.from_expr(arg))
elif expr.op.startswith("signExt"):
arg, = expr.args
res = z3.SignExt(expr.size - arg.size, self.from_expr(arg))
else:
raise NotImplementedError("Unsupported OP yet: %s" % expr.op)
return res
def from_ExprAssign(self, expr):
src = self.from_expr(expr.src)
dst = self.from_expr(expr.dst)
return (src == dst)
# Register the class
Translator.register(TranslatorZ3)
| gpl-2.0 | -3,190,478,345,284,547,600 | 35.384342 | 81 | 0.51252 | false |
NRGI/nrgi-visualizations | blog-volitility-dm/data/transform.py | 1 | 3179 | #!/usr/bin/python
""" Converts eiti csv to json """
import json
import csv
# import sys
# import pycountry
def main():
""" main function """
with open('./source_data.csv', 'rU') as infile:
csv_reader = csv.reader(infile, delimiter=',', quotechar='"')
header = next(csv_reader)
# lkey = {
# str(header[i]).replace("text:u", "").replace("'", ""): i for i in range(0, len(header))
# }
ikey = {
i: str(header[i]).replace("text:u", "").replace("'", "") for i in range(0, len(header))
}
num_cols = len(next(csv_reader))
abs_index_list = {}
vol_index_list = {}
current_country = 'none'
abs_json_data = []
vol_json_data = []
for row in csv_reader:
if row[0] == current_country:
pass
else:
current_country = row[0]
current_country_id = row[1]
current_var = row[2]
for col in range(3, num_cols):
if row[col] == '':
value = None
else:
try:
value = float(row[col])
except ValueError, e:
value = None
if current_var == 'GGExp' or current_var == 'GGRev':
if current_country_id+ikey[col] not in abs_index_list:
abs_json_data.append({
"name": current_country,
'id': current_country_id,
"year": ikey[col],
current_var: value
})
abs_index_list[current_country_id+ikey[col]] = len(abs_json_data)-1
elif current_country_id+ikey[col] in abs_index_list:
abs_json_data[abs_index_list[current_country_id+ikey[col]]][current_var] = value
elif current_var == 'GGExpCh' or current_var == 'GGExpCh':
if current_country_id+ikey[col] not in vol_index_list:
vol_json_data.append({
"name": current_country,
'id': current_country_id,
"year": ikey[col],
current_var: value
})
vol_index_list[current_country_id+ikey[col]] = len(vol_json_data)-1
elif current_country_id+ikey[col] in vol_index_list:
vol_json_data[vol_index_list[current_country_id+ikey[col]]][current_var] = value
# write out archive of update into archive folder
with open('./abs_data.json', 'w') as out1:
out1.write(json.dumps(abs_json_data, indent=4, separators=(',', ':')))
with open('./vol_data.json', 'w') as out1:
out1.write(json.dumps(vol_json_data, indent=4, separators=(',', ':')))
if __name__ == "__main__":
main()
# main(sys.argv[1:])
| mit | 1,499,990,195,249,953,300 | 41.959459 | 108 | 0.439761 | false |
guillaume-philippon/aquilon | tests/broker/test_search_cluster.py | 1 | 11470 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the search cluster command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestSearchCluster(TestBrokerCommand):
def testarchetypeavailable(self):
command = "search cluster --archetype hacluster"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utvcs1", command)
self.matchclean(out, "utgrid1", command)
self.matchclean(out, "utstorage1", command)
def testarchetypeunavailable(self):
command = "search cluster --archetype archetype-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Archetype archetype-does-not-exist not found",
command)
def testbuildstatusavailable(self):
command = "search cluster --buildstatus build"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utvcs1", command)
self.matchoutput(out, "utstorage2", command)
self.matchoutput(out, "utgrid1", command)
self.matchclean(out, "utstorages2", command)
def testbuildstatusunavailable(self):
command = "search cluster --buildstatus status-does-not-exist"
out = self.badrequesttest(command.split(" "))
self.matchoutput(out, "Unknown cluster lifecycle 'status-does-not-exist'",
command)
def testclustertype(self):
command = "search cluster --cluster_type compute"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utvcs1", command)
self.matchoutput(out, "utgrid1", command)
self.matchclean(out, "utstorage1", command)
def testclusterandarchetype(self):
command = ['search', 'cluster', '--archetype', 'utarchetype1',
'--cluster_type', 'compute']
self.notfoundtest(command)
def testpersonalityavailable(self):
command = "search cluster --personality metrocluster"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utstorage1", command)
self.matchoutput(out, "utstorage2", command)
def testpersonalityavailable2(self):
command = ['search', 'cluster', '--archetype', 'storagecluster',
'--personality', 'metrocluster']
out = self.commandtest(command)
self.matchoutput(out, "utstorage1", command)
self.matchoutput(out, "utstorage2", command)
self.matchclean(out, "utgrid1", command)
def testpersonalityunavailable(self):
command = ['search', 'cluster', '--archetype', 'storagecluster',
'--personality', 'personality-does-not-exist']
out = self.notfoundtest(command)
self.matchoutput(out, "Personality personality-does-not-exist, "
"archetype storagecluster not found.", command)
def testpersonalityunavailable2(self):
command = "search cluster --personality personality-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Personality personality-does-not-exist "
"not found.", command)
def testsandboxavailable(self):
command = ["search_cluster", "--sandbox=%s/utsandbox" % self.user]
out = self.commandtest(command)
self.matchoutput(out, "utstorages2", command)
self.matchclean(out, "utstorage2", command)
def testdomainavailable(self):
command = ["search_cluster", "--domain=unittest"]
out = self.commandtest(command)
self.matchoutput(out, "utvcs1", command)
self.matchoutput(out, "utgrid1", command)
self.matchoutput(out, "utstorage1", command)
self.matchoutput(out, "utstorage2", command)
self.matchclean(out, "utstorages2", command)
def testdomainunavailable(self):
command = ["search_cluster", "--domain=domain-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Domain domain-does-not-exist not found.", command)
def testclusterlocationavailable(self):
command = "search cluster --cluster_building ut"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utvcs1", command)
self.matchoutput(out, "utgrid1", command)
self.matchoutput(out, "utstorage1", command)
self.matchoutput(out, "utstorage2", command)
self.matchclean(out, "utstorages2", command) # bu
def testclusterlocationunavailable(self):
command = ["search_cluster",
"--cluster_building=building-does-not-exist"]
out = self.notfoundtest(command)
self.matchoutput(out, "Building building-does-not-exist not found",
command)
def testallowedpersonalityavailable(self):
command = "search cluster --allowed_personality vulcan-10g-server-prod"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
# No Personality.get_unique in the code.
def testallowedpersonalityunavailable(self):
command = ['search', 'cluster',
'--allowed_personality', 'personality-does-not-exist']
self.notfoundtest(command)
def testallowedarchetypeavailable(self):
command = "search cluster --allowed_archetype vmhost"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
def testallowedarchetypeunavailable(self):
command = "search cluster --allowed_archetype archetype-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out,
"Archetype archetype-does-not-exist not found.",
command)
def testdownhoststhreshold(self):
command = "search cluster --down_hosts_threshold 2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command) # 2
self.matchoutput(out, "utecl3", command) # 2
self.matchclean(out, "utecl2", command) # 1
self.matchclean(out, "utgrid1", command) # 5%
self.matchclean(out, "utstorage1", command) # 1
def testdownhoststhresholdpercent(self):
command = "search cluster --down_hosts_threshold 5%"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utgrid1", command) # 5%
self.matchclean(out, "utecl2", command) # 1
self.matchclean(out, "utecl3", command) # 2
def testdownmaintthreshold(self):
command = "search cluster --down_maint_threshold 1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utstorage1", command) # 1
self.matchclean(out, "utvcs1", command) # None
self.matchclean(out, "utgrid1", command) # 0
def testmaxmembers(self):
command = "search cluster --max_members 2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utvcs1", command)
self.matchclean(out, "utgrid1", command) # 2000
self.matchoutput(out, "utstorage1", command)
def testmemberarchetype(self):
command = "search cluster --member_archetype vmhost"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
self.matchclean(out, "utgrid1", command)
self.matchclean(out, "utstorage1", command)
def testmemberarchetypeunavailable(self):
command = "search cluster --member_archetype archetype-does-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out,
"Archetype archetype-does-not-exist not found.",
command)
def testmemberpersonalityandarchetypeunavailable(self):
command = ['search', 'cluster', '--member_archetype', 'filer',
'--member_personality', 'vulcan-10g-server-prod']
out = self.notfoundtest(command)
self.matchoutput(out,
"Personality vulcan-10g-server-prod, archetype filer not found.",
command)
def testmemberpersonality(self):
command = "search cluster --member_personality vulcan-10g-server-prod"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
self.matchclean(out, "utgrid1", command)
self.matchclean(out, "utstorage1", command)
# No Personality.get_unique in the code.
def testmemberpersonalityunavailable(self):
command = ['search', 'cluster',
'--member_personality', 'personality-does-not-exist']
self.notfoundtest(command)
# based on testvmhostlocationbuilding, to see that member_ location works
def testvmhostlocationbuilding(self):
command = "search cluster --member_building ut"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
self.matchoutput(out, "utecl2", command)
self.matchclean(out, "utecl3", command)
self.matchclean(out, "utecl4", command)
def testmemberlocationlist(self):
command = ["search_cluster", "--member_building", "utb1,utb3"]
out = self.commandtest(command)
self.matchoutput(out, "utbvcs4a", command)
self.matchoutput(out, "utbvcs4b", command)
self.matchoutput(out, "utbvcs5c", command)
self.matchclean(out, "utbvcs1", command)
self.matchclean(out, "utbvcs2", command)
self.matchclean(out, "utecl1", command)
self.matchclean(out, "utgrid1", command)
self.matchclean(out, "utstorage1", command)
def testmemberlocationlistempty(self):
command = ["search_cluster", "--member_building", "ut,np"]
self.noouttest(command)
def testshareavailable(self):
command = "search cluster --share test_share_2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "utecl1", command)
self.matchclean(out, "utecl2", command)
self.matchclean(out, "utecl3", command)
self.matchclean(out, "utecl4", command)
def testshareunavailable(self):
command = ['search', 'cluster', '--share', 'share-does-not-exist']
out = self.notfoundtest(command)
self.matchoutput(out,
"Share share-does-not-exist not found.", command)
def testsearchmetaclustershare(self):
self.noouttest(["search_cluster", "--share", "test_v2_share"])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSearchCluster)
unittest.TextTestRunner(verbosity=5).run(suite)
| apache-2.0 | 6,503,041,860,555,801,000 | 40.709091 | 90 | 0.641238 | false |
apahim/avocado-misc-tests | fuzz/trinity.py | 1 | 3274 | #!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2016 IBM
# Author: Praveen K Pandey <[email protected]>
#
import os
import re
from avocado import Test
from avocado import main
from avocado.utils import archive, build, process
from avocado.utils.software_manager import SoftwareManager
class Trinity(Test):
"""
This testsuite test syscall by calling syscall
with random system call and varying number of args
"""
def setUp(self):
'''
Build Trinity
Source:
https://github.com/kernelslacker/trinity
'''
"""
Add not root user
"""
if process.system('getent group trinity', ignore_status=True):
process.run('groupadd trinity', sudo=True)
if process.system('getent passwd trinity', ignore_status=True):
process.run(
'useradd -g trinity -m -d /home/trinity trinity', sudo=True)
process.run('usermod -a -G trinity trinity', sudo=True)
smm = SoftwareManager()
for package in ("gcc", "make"):
if not smm.check_installed(package) and not smm.install(package):
self.error(
"Fail to install %s required for this test." % package)
locations = ["https://github.com/kernelslacker/trinity/archive/"
"master.zip"]
tarball = self.fetch_asset("trinity.zip", locations=locations,
expire='7d')
archive.extract(tarball, self.srcdir)
self.srcdir = os.path.join(self.srcdir, 'trinity-master')
os.chdir(self.srcdir)
process.run('chmod -R +x ' + self.srcdir)
process.run('./configure', shell=True)
build.make('.')
process.run('touch trinity.log')
process.run('cp -r ' + self.srcdir + ' /home/trinity')
self.srcdir = os.path.join('/home/trinity', 'trinity-master')
process.run('chown -R trinity:trinity ' + self.srcdir)
def test(self):
'''
Trinity need to run as non root user
'''
args = self.params.get('runarg', default=' ')
process.system('su - trinity -c " %s %s %s"' %
(os.path.join(self.srcdir, 'trinity'), args,
'-N 1000000'), shell=True)
dmesg = process.system_output('dmesg')
# verify if system having issue after fuzzer run
match = re.search(r'unhandled', dmesg, re.M | re.I)
if match:
self.log.info("Testcase failure as segfault")
match = re.search(r'Call Trace:', dmesg, re.M | re.I)
if match:
self.log.info("some call traces seen please check")
def tearDown(self):
process.system('userdel -r trinity', sudo=True)
if __name__ == "__main__":
main()
| gpl-2.0 | -5,437,035,175,734,282,000 | 30.786408 | 78 | 0.603849 | false |
douglask3/allocationModules | runGday/EucFace/scripts/translate_GDAY_output_to_EUCFACE_format.py | 1 | 15762 | #!/usr/bin/env python
# coding: utf-8
""" Translate GDAY output file
Match the NCEAS format and while we are at it carry out unit conversion so that
we matched required standard. Data should be comma-delimited
"""
import shutil
import os
import numpy as np
import csv
import sys
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pd
from cStringIO import StringIO
__author__ = "Martin De Kauwe"
__version__ = "1.0 (06.04.2011)"
__email__ = "[email protected]"
def date_converter(*args):
return dt.datetime.strptime(str(int(float(args[0]))) + " " +\
str(int(float(args[1]))), '%Y %j')
def translate_output(infname, met_fname):
outdir = "../outputs"
UNDEF = -9999.
units = setup_units()
variable, variable_names = setup_varnames()
# load met stuff, i.e. the stuff needed for NCEAS output that G'day
# does not output
envir = load_met_input_data(met_fname)
# load the rest of the g'day output
(gday, git_ver) = load_gday_output(infname)
# merge dictionaries to ease output
data_dict = dict(envir, **gday)
ofname = os.path.join(outdir, "temp.nceas")
f = open(ofname, "w")
f.write("%s," % (git_ver))
# write output in csv format
writer = csv.writer(f, dialect=csv.excel)
# write header for csv file
writer.writerow(variable)
writer.writerow(units)
writer.writerow(variable_names)
for i in xrange(len(gday['DOY'])):
writer.writerow([("%.8f" % (float(data_dict[k][i])) \
if data_dict.has_key(k) else UNDEF)
for k in variable_names])
# Need to replace the temp file with the infname which is actually
# the filename we want to use
shutil.move(ofname, infname)
def remove_comments_from_header(fname):
""" I have made files with comments which means the headings can't be
parsed to get dictionary headers for pandas! Solution is to remove these
comments first """
s = StringIO()
with open(fname) as f:
for line in f:
if '#' in line:
line = line.replace("#", "").lstrip(' ')
s.write(line)
s.seek(0) # "rewind" to the beginning of the StringIO object
return s
def remove_comments_from_header_and_get_git_rev(fname):
""" I have made files with comments which means the headings can't be
parsed to get dictionary headers for pandas! Solution is to remove these
comments first """
s = StringIO()
with open(fname) as f:
line_counter = 0
for line in f:
if line_counter == 0:
git_ver = line.rstrip(' ')
if '#' in line:
line = line.replace("#", "").lstrip(' ')
s.write(line)
line_counter += 1
s.seek(0) # "rewind" to the beginning of the StringIO object
return s, git_ver
def load_met_input_data(fname):
MJ_TO_MOL = 4.6
SW_TO_PAR = 0.48
DAYS_TO_HRS = 24.0
UMOL_TO_MOL = 1E-6
tonnes_per_ha_to_g_m2 = 100.0
s = remove_comments_from_header(fname)
met_data = pd.read_csv(s, parse_dates=[[0,1]], skiprows=4, index_col=0,
sep=",", keep_date_col=True,
date_parser=date_converter)
precip = met_data["rain"]
#par = met_data[:,1] * MJ_TO_MOL * SW_TO_PAR
par = met_data["par"] * UMOL_TO_MOL
air_temp = met_data["tair"]
soil_temp = met_data["tsoil"]
vpd = met_data["vpd_avg"]
co2 = met_data["co2"]
ndep = met_data["ndep"] * tonnes_per_ha_to_g_m2
return {'CO2': co2, 'PPT':precip, 'PAR':par, 'AT':air_temp, 'ST':soil_temp,
'VPD':vpd, 'NDEP':ndep}
def load_gday_output(fname):
SW_RAD_TO_PAR = 2.3
UNDEF = -9999.
tonnes_per_ha_to_g_m2 = 100
yr_to_day = 365.25
(s, git_ver) = remove_comments_from_header_and_get_git_rev(fname)
out = pd.read_csv(s, parse_dates=[[0,1]], skiprows=1, index_col=0,
sep=",", keep_date_col=True, date_parser=date_converter)
year = out["year"]
doy = out["doy"]
# state outputs
pawater_root = out["pawater_root"]
shoot = out["shoot"] * tonnes_per_ha_to_g_m2
stem = out["stem"] * tonnes_per_ha_to_g_m2
branch = out["branch"] * tonnes_per_ha_to_g_m2
fine_root = out["root"] * tonnes_per_ha_to_g_m2
coarse_root = out["croot"] * tonnes_per_ha_to_g_m2
coarse_rootn = out["crootn"] * tonnes_per_ha_to_g_m2
litterc = out["litterc"] * tonnes_per_ha_to_g_m2
littercag = out["littercag"] * tonnes_per_ha_to_g_m2
littercbg = out["littercbg"] * tonnes_per_ha_to_g_m2
soilc = out["soilc"] * tonnes_per_ha_to_g_m2
lai = out["lai"]
shootn = out["shootn"] * tonnes_per_ha_to_g_m2
stemn = out["stemn"] * tonnes_per_ha_to_g_m2
branchn = out["branchn"] * tonnes_per_ha_to_g_m2
rootn = out["rootn"] * tonnes_per_ha_to_g_m2
crootn = out["crootn"] * tonnes_per_ha_to_g_m2
litternag = out["litternag"] * tonnes_per_ha_to_g_m2
litternbg = out["litternbg"] * tonnes_per_ha_to_g_m2
nsoil = out["soiln"] * tonnes_per_ha_to_g_m2
inorgn = out["inorgn"] * tonnes_per_ha_to_g_m2
tnc = out["cstore"] * tonnes_per_ha_to_g_m2
nstorage = out["nstore"] * tonnes_per_ha_to_g_m2
activesoiln = out["activesoiln"] * tonnes_per_ha_to_g_m2
slowsoiln = out["slowsoiln"] * tonnes_per_ha_to_g_m2
passivesoiln = out["passivesoiln"] * tonnes_per_ha_to_g_m2
npoolo = activesoiln + slowsoiln + passivesoiln
# fluxes outputs
beta = out["wtfac_root"]
nep = out["nep"] * tonnes_per_ha_to_g_m2
gpp = out["gpp"] * tonnes_per_ha_to_g_m2
npp = out["npp"] * tonnes_per_ha_to_g_m2
rh = out["hetero_resp"] * tonnes_per_ha_to_g_m2
ra = out["auto_resp"] * tonnes_per_ha_to_g_m2
et = out["et"] # mm of water' are same value as kg/m2
trans = out["transpiration"] # mm of water' are same value as kg/m2
soil_evap = out["soil_evap"] # mm of water' are same value as kg/m2
can_evap = out["interception"] # mm of water' are same value as kg/m2
runoff = out["runoff"] # mm of water' are same value as kg/m2
gl = out["cpleaf"] * tonnes_per_ha_to_g_m2
# gw summed from cpstem and cpbranch below
cpstem = out["cpstem"] * tonnes_per_ha_to_g_m2
cpbranch = out["cpbranch"] * tonnes_per_ha_to_g_m2
gr = out["cproot"] * tonnes_per_ha_to_g_m2
gcr = out["cpcroot"] * tonnes_per_ha_to_g_m2
deadleaves = out["deadleaves"] * tonnes_per_ha_to_g_m2
deadroots = out["deadroots"] * tonnes_per_ha_to_g_m2
deadcroots = out["deadcroots"] * tonnes_per_ha_to_g_m2
deadbranch = out["deadbranch"] * tonnes_per_ha_to_g_m2
deadstems = out["deadstems"] * tonnes_per_ha_to_g_m2
deadleafn = out["deadleafn"] * tonnes_per_ha_to_g_m2
deadbranchn = out["deadbranchn"] * tonnes_per_ha_to_g_m2
deadstemn = out["deadstemn"] * tonnes_per_ha_to_g_m2
deadrootn = out["deadrootn"] * tonnes_per_ha_to_g_m2
deadcrootn = out["deadcrootn"] * tonnes_per_ha_to_g_m2
nup = out["nuptake"] * tonnes_per_ha_to_g_m2
ngross = out["ngross"] * tonnes_per_ha_to_g_m2
nmin = out["nmineralisation"] * tonnes_per_ha_to_g_m2
npleaf = out["npleaf"] * tonnes_per_ha_to_g_m2
nproot = out["nproot"] * tonnes_per_ha_to_g_m2
npcroot = out["npcroot"] * tonnes_per_ha_to_g_m2
npstemimm = out["npstemimm"] * tonnes_per_ha_to_g_m2
npstemmob = out["npstemmob"] * tonnes_per_ha_to_g_m2
npbranch = out["npbranch"] * tonnes_per_ha_to_g_m2
UMOL_TO_MOL = 1E-6
MOL_TO_MJ = 1.0 / 4.6
convx = UMOL_TO_MOL * MOL_TO_MJ
apar = out["apar"] * convx
gcd = out["gs_mol_m2_sec"]
ga = out["ga_mol_m2_sec"]
nleach = out["nloss"] * tonnes_per_ha_to_g_m2
cfretransn = out["leafretransn"] * tonnes_per_ha_to_g_m2
# Misc stuff we don't output
drainage = [UNDEF] * len(doy)
rleaf = [UNDEF] * len(doy)
rwood = [UNDEF] * len(doy)
rgrow = [UNDEF] * len(doy)
rsoil = [UNDEF] * len(doy)
cex = [UNDEF] * len(doy)
cvoc = [UNDEF] * len(doy)
lh = [UNDEF] * len(doy)
sh = [UNDEF] * len(doy)
ccoarse_lit = [UNDEF] * len(doy)
ndw = [UNDEF] * len(doy)
nfix = [UNDEF] * len(doy)
nvol = [UNDEF] * len(doy)
gb = [UNDEF] * len(doy)
grepr = [UNDEF] * len(doy)
cwretransn = [UNDEF] * len(doy)
ccrretransn = [UNDEF] * len(doy)
cfrretransn = [UNDEF] * len(doy)
# Misc calcs from fluxes/state
lma = shoot / lai
ncon = shootn / shoot
recosys = rh + ra
cw = stem + branch
gw = cpstem + cpbranch
cwn = stemn + branchn
cwin = deadstems + deadbranch
ccrlin = deadcroots
cfrlin = deadroots
ndeadwood = deadbranchn + deadstemn
nwood_growth = npstemimm + npstemmob + npbranch
return {'YEAR':year, 'DOY':doy, 'SW':pawater_root,
'NEP':nep, 'GPP':gpp, 'NPP':npp, 'CEX':cex, 'CVOC':cvoc,
'RECO':recosys, 'RAUTO':ra, 'RLEAF':rleaf, 'RWOOD':rwood,
'RGROW':rgrow, 'RHET':rh, 'RSOIL':rsoil, 'ET':et, 'T':trans,
'ES':soil_evap, 'EC':can_evap, 'RO':runoff, 'DRAIN':drainage,
'LE':lh, 'SH':sh, 'CL':shoot, 'CW':cw, 'CCR':coarse_root,
'CFR':fine_root, 'TNC':tnc, 'CFLIT':litterc, 'CFLITA':littercag,
'CFLITB':littercbg, 'CCLITB':ccoarse_lit, 'CSOIL':soilc,
'GL':gl, 'GW':gw, 'GCR':gcr, 'GR':gr, 'GREPR':grepr, 'CLLFALL':deadleaves,
'CCRLIN':ccrlin, 'CFRLIN':cfrlin, 'CWIN':cwin, 'LAI':lai, 'LMA':lma, 'NCON':ncon,
'NCAN':shootn, 'NWOOD':cwn, 'NCR':coarse_rootn, 'NFR':rootn,
'NSTOR':nstorage, 'NLIT':litternag, 'NRLIT':litternbg, 'NDW':ndw,
'NSOIL':nsoil, 'NPOOLM':inorgn, 'NPOOLO':npoolo, 'NFIX':nfix,
'NLITIN':deadleafn, 'NWLIN':ndeadwood, 'NCRLIN':deadcrootn,
'NFRLIN':deadrootn, 'NUP':nup,
'NGMIN':ngross, 'NMIN':nmin, 'NVOL': nvol, 'NLEACH':nleach,
'NGL':npleaf, 'NGW':nwood_growth, 'NGCR':npcroot, 'NGR':nproot,
'APARd':apar, 'GCd':gcd, 'GAd':ga, 'Gbd':gb, 'Betad':beta,
'NLRETRANS':cfretransn, 'NWRETRANS':cwretransn,
'NCRRETRANS':ccrretransn, 'NFRRETRANS':cfrretransn}, git_ver
def setup_units():
units = ['--','--','Mean ppm', 'PPT', 'mol m-2', 'Mean DegC', 'Mean DegC',
'kPa h', 'mm', 'gN m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'kgH2O m-2 d-1',
'kgH2O m-2 d-1', 'kgH2O m-2 d-1', 'kgH2O m-2 d-1',
'kgH2O m-2 d-1', 'kgH2O m-2 d-1', 'MJ m-2', 'MJ m-2',
'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2',
'gC m-2', 'gC m-2', 'gC m-2', 'gC m-2 0 to 30 cm',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1', 'gC m-2 d-1',
'gC m-2 d-1', 'm2 m-2', 'gC m-2',
'gN gd.m.-1', 'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2',
'gN m-2', 'gN m-2', 'gN m-2', 'gN m-2 0 to 30 cm',
'gN m-2 0 to 30 cm', 'gN m-2 0 to 30 cm', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1',
'MJ m-2 d-1', 'mol H2O m-2 s-1', 'mol H2O m-2 s-1',
'mol H2O m-2 s-1', 'frac', 'gN m-2 d-1', 'gN m-2 d-1',
'gN m-2 d-1', 'gN m-2 d-1']
return units
def setup_varnames():
variable = ['Year', 'Day of the year', 'CO2', 'Precipitation', 'PAR',
'Air temp canopy', 'Soil temp 10 cm', 'Vapour Pres Def',
'Total soil water content', 'N deposition', 'Net Eco Prod',
'Gross Prim Prod', 'Net Prim Prod', 'C exudation',
'C VOC Flux', 'Resp ecosystem', 'Resp autotrophic',
'Resp leaves (maint)', 'Resp Wood (maint)',
'Resp Fine Root (maint)', 'Resp growth',
'Resp heterotrophic', 'Resp from soil',
'Evapotranspiration', 'Transpiration', 'Soil Evaporation',
'Canopy evaporation', 'Runoff', 'Drainage', 'Latent Energy',
'Sensible Heat', 'C Leaf Mass', 'C Wood Mass',
'C Coarse Root mass', 'C Fine Root mass',
'C Storage as TNC', 'C Fine Litter Total',
'C Fine Litter above', 'C Fine Litter below',
'C Coarse Litter', 'C Soil', 'C Leaf growth',
'C Wood growth', 'C Coarse Root growth',
'C Fine Root growth', 'C reproduction growth',
'C Leaf Litterfall',
'C Coarse Root litter inputs', 'C Fine Root litter inputs',
'C Wood/branch inputs',
'LAI projected', 'Leaf gC/leaf area', 'N Conc Leaves',
'N Mass Leaves', 'N Mass Wood', 'N Mass Coarse Roots',
'N Mass Fine Roots', 'N storage', 'N litter aboveground',
'N litter belowground', 'N Dead wood', 'N Soil Total',
'N in Mineral form', 'N in Organic form', 'N fixation',
'N Leaf Litterfall', 'N Wood/brch litterfall',
'N Coarse Root litter input', 'N Fine Root litter input',
'N Biomass Uptake',
'N Gross Mineralization', 'N Net mineralization',
'N Volatilization', 'N Leaching', 'N Leaf growth',
'N Wood growth', 'N CR growth', 'N Fine Root growth',
'Aborbed PAR', 'Average daytime canopy conductance',
'Average daytime aerodynamic conductance',
'Average daytime leaf boundary conductance',
'Soil moisture stress', 'Foliage retranslocation',
'Wood/Branch retranslocation', 'Coarse Root retranslocation',
'Fine Root retranslocation']
variable_names = ['YEAR', 'DOY', 'CO2', 'PPT', 'PAR', 'AT', 'ST', 'VPD',
'SW', 'NDEP', 'NEP', 'GPP', 'NPP', 'CEX', 'CVOC',
'RECO', 'RAUTO', 'RLEAF', 'RWOOD', 'RROOT', 'RGROW',
'RHET', 'RSOIL',
'ET', 'T', 'ES', 'EC', 'RO', 'DRAIN', 'LE', 'SH',
'CL', 'CW', 'CCR', 'CFR', 'TNC', 'CFLIT', 'CFLITA',
'CFLITB', 'CCLITB', 'CSOIL', 'GL', 'GW', 'GCR', 'GR',
'GREPR','CLLFALL', 'CCRLIN', 'CFRLIN','CWIN', 'LAI',
'LMA', 'NCON', 'NCAN', 'NWOOD', 'NCR', 'NFR', 'NSTOR',
'NLIT','NRLIT', 'NDW', 'NSOIL', 'NPOOLM', 'NPOOLO', 'NFIX',
'NLITIN', 'NWLIN', 'NCRLIN', 'NFRLIN','NUP', 'NGMIN', 'NMIN',
'NVOL', 'NLEACH', 'NGL', 'NGW', 'NGCR', 'NGR', 'APARd',
'GCd', 'GAd', 'GBd', 'Betad','NLRETRANS', 'NWRETRANS',
'NCRRETRANS', 'NFRRETRANS']
return variable, variable_names
if __name__ == "__main__":
fname = "dk_fixco2_fixndep_forest_equilib.out"
met_fname = "duke_equilibrium_metdata_fixndep_0.004_fixco2_270.gin"
translate_output(fname, met_fname)
| gpl-2.0 | -7,590,501,808,791,468,000 | 42.186301 | 93 | 0.532483 | false |
eHealthAfrica/LMIS | LMIS/cce/models.py | 1 | 3454 | """
This module handles all cold chain equipment management issues such as temperature log, problem log, current
facility the CCE is at and the warehouse.
"""
#import django modules
from django.db import models
#import external modules
import reversion
from mptt.models import MPTTModel, TreeForeignKey
from model_utils import Choices
#import project app modules
from core.models import BaseModel, UnitOfMeasurement
from facilities.models import Facility
class StorageLocationType(MPTTModel, BaseModel):
"""
StorageLocationType is used to group storage locations both cold and dry storage location
"""
name = models.CharField(max_length=20, unique=True)
description = models.CharField(max_length=100, blank=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='sub_types')
def __str__(self):
return '{name}'.format(name=self.name)
class StorageLocation(MPTTModel, BaseModel):
"""
StorageLocation is used to model physical or virtual places where products are stored
"""
STATUS = Choices((0, 'working', ('Working')), (1, 'not_working', ('Not Working')), (2, 'in_repair', ('In Repair')),)
code = models.CharField(max_length=55, unique=True)
name = models.CharField(max_length=55, unique=True)
facility = models.ForeignKey(Facility)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children_storage_location')
gross_capacity = models.FloatField(blank=True, null=True)
net_capacity = models.FloatField(blank=True, null=True)
capacity_uom = models.ForeignKey(UnitOfMeasurement, related_name='capacity_uom', null=True, blank=True)
type = models.ForeignKey(StorageLocationType)
is_cold_store = models.BooleanField()
minimum_temperature = models.FloatField(blank=True, null=True, verbose_name='min_temp.')
maximum_temperature = models.FloatField(blank=True, null=True, verbose_name='max_temp.')
temperature_uom = models.ForeignKey(UnitOfMeasurement, related_name='temp_uom', blank=True, null=True)
status = models.IntegerField(choices=STATUS)
def __str__(self):
return '{code}-{name}'.format(code=self.code, name=self.name)
class StorageLocationTempLog(BaseModel):
"""
Used to keep storage locations' Temp. Log especially cold chain types
"""
temperature = models.FloatField()
temperature_uom = models.ForeignKey(UnitOfMeasurement)
storage_location = models.ForeignKey(StorageLocation)
date_time_logged = models.DateTimeField()
def __str__(self):
return '{storage_loc_code}-{temp}'.format(storage_loc_code=self.storage_location.code, temp=self.temperature)
class StorageLocationProblemLog(BaseModel):
"""
This model is used to keep problem log for storage locations
"""
storage_location = models.ForeignKey(StorageLocation)
description = models.CharField(max_length=200, blank=True)
start_date = models.DateField()
fixed_date = models.DateField(blank=True, null=True)
def __str__(self):
return '{storage_loc_name}-{description}'.format(storage_loc_name=self.storage_location.name,
description=self.description)
#Register models that will be tracked with reversion
reversion.register(StorageLocationType)
reversion.register(StorageLocation)
reversion.register(StorageLocationTempLog)
reversion.register(StorageLocationProblemLog) | gpl-2.0 | -2,208,780,214,593,822,000 | 39.647059 | 120 | 0.720324 | false |
jmcarp/marshmallow | marshmallow/utils.py | 1 | 9397 | # -*- coding: utf-8 -*-
"""Utility methods for marshmallow."""
from __future__ import absolute_import
import json
import datetime
import time
import inspect
from email.utils import formatdate, parsedate
from calendar import timegm
import types
from decimal import Decimal, Context, Inexact
from pprint import pprint as py_pprint
dateutil_available = False
try:
from dateutil import parser
dateutil_available = True
except ImportError:
dateutil_available = False
from marshmallow.compat import basestring, OrderedDict, binary_type, text_type
def is_generator(obj):
"""Return True if ``obj`` is a generator
"""
return inspect.isgeneratorfunction(obj) or inspect.isgenerator(obj)
def is_iterable_but_not_string(obj):
"""Return True if ``obj`` is an iterable object that isn't a string."""
return (hasattr(obj, "__iter__") and not hasattr(obj, "strip")) or is_generator(obj)
def is_indexable_but_not_string(obj):
"""Return True if ``obj`` is indexable but isn't a string."""
return not hasattr(obj, "strip") and hasattr(obj, "__getitem__")
def is_collection(obj):
"""Return True if ``obj`` is a collection type, e.g list, tuple, queryset.
"""
return is_iterable_but_not_string(obj) and not isinstance(obj, dict)
def is_instance_or_subclass(val, class_):
'''Return True if ``val`` is either a subclass or instance of ``class_``.
'''
try:
return issubclass(val, class_)
except TypeError:
return isinstance(val, class_)
def is_keyed_tuple(obj):
"""Return True if ``obj`` has keyed tuple behavior, such as
namedtuples or SQLAlchemy's KeyedTuples.
"""
return isinstance(obj, tuple) and hasattr(obj, '_fields')
def float_to_decimal(f):
"""Convert a floating point number to a Decimal with no loss of information.
See: http://docs.python.org/release/2.6.7/library/decimal.html#decimal-faq
"""
n, d = f.as_integer_ratio()
numerator, denominator = Decimal(n), Decimal(d)
ctx = Context(prec=60)
result = ctx.divide(numerator, denominator)
while ctx.flags[Inexact]:
ctx.flags[Inexact] = False
ctx.prec *= 2
result = ctx.divide(numerator, denominator)
return result
def to_marshallable_type(obj, field_names=None):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
if hasattr(obj, '__getitem__') and not is_keyed_tuple(obj):
return obj # it is indexable it is ok
if isinstance(obj, types.GeneratorType):
return list(obj)
if field_names:
# exclude field names that aren't actual attributes of the object
attrs = set(dir(obj)) & set(field_names)
else:
attrs = set(dir(obj))
return dict([(attr, getattr(obj, attr, None)) for attr in attrs
if not attr.startswith("__") and not attr.endswith("__")])
def pprint(obj, *args, **kwargs):
"""Pretty-printing function that can pretty-print OrderedDicts
like regular dictionaries. Useful for printing the output of
:meth:`marshmallow.Serializer.dump`.
"""
if isinstance(obj, OrderedDict):
print(json.dumps(obj, *args, **kwargs))
else:
py_pprint(obj, *args, **kwargs)
# From pytz: http://pytz.sourceforge.net/
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
class UTC(datetime.tzinfo):
"""UTC
Optimized UTC implementation. It unpickles using the single module global
instance defined beneath this class declaration.
"""
zone = "UTC"
_utcoffset = ZERO
_dst = ZERO
_tzname = zone
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
UTC = utc = UTC() # UTC is a singleton
def local_rfcformat(dt):
"""Return the RFC822-formatted representation of a timezone-aware datetime
with the UTC offset.
"""
weekday = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][dt.weekday()]
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec"][dt.month - 1]
tz_offset = dt.strftime("%z")
return "%s, %02d %s %04d %02d:%02d:%02d %s" % (weekday, dt.day, month,
dt.year, dt.hour, dt.minute, dt.second, tz_offset)
def rfcformat(dt, localtime=False):
"""Return the RFC822-formatted representation of a datetime object.
:param datetime dt: The datetime.
:param bool localtime: If ``True``, return the date relative to the local
timezone instead of UTC, displaying the proper offset,
e.g. "Sun, 10 Nov 2013 08:23:45 -0600"
"""
if not localtime:
return formatdate(timegm(dt.utctimetuple()))
else:
return local_rfcformat(dt)
def isoformat(dt, localtime=False, *args, **kwargs):
"""Return the ISO8601-formatted UTC representation of a datetime object.
"""
if localtime and dt.tzinfo is not None:
localized = dt
else:
if dt.tzinfo is None:
localized = UTC.localize(dt)
else:
localized = dt.astimezone(UTC)
return localized.isoformat(*args, **kwargs)
def from_datestring(datestring):
"""Parse an arbitrary datestring and return a datetime object using
dateutils' parser.
"""
if dateutil_available:
return parser.parse(datestring)
else:
raise RuntimeError('from_datestring requires the python-dateutils to be'
'installed.')
def from_rfc(datestring, use_dateutil=True):
"""Parse a RFC822-formatted datetime string and return a datetime object.
Use dateutil's parser if possible.
https://stackoverflow.com/questions/885015/how-to-parse-a-rfc-2822-date-time-into-a-python-datetime
"""
# Use dateutil's parser if possible
if dateutil_available and use_dateutil:
return parser.parse(datestring)
else:
parsed = parsedate(datestring) # as a tuple
timestamp = time.mktime(parsed)
return datetime.datetime.fromtimestamp(timestamp)
def from_iso(datestring, use_dateutil=True):
"""Parse an ISO8601-formatted datetime string and return a datetime object.
Use dateutil's parser if possible and return a timezone-aware datetime.
"""
# Use dateutil's parser if possible
if dateutil_available and use_dateutil:
return parser.parse(datestring)
else:
# Strip off timezone info.
return datetime.datetime.strptime(datestring[:19], '%Y-%m-%dT%H:%M:%S')
def from_iso_time(timestring, use_dateutil=True):
"""Parse an ISO8601-formatted datetime string and return a datetime.time
object.
"""
if dateutil_available and use_dateutil:
return parser.parse(timestring).time()
else:
if len(timestring) > 8: # has microseconds
fmt = '%H:%M:%S.%f'
else:
fmt = '%H:%M:%S'
return datetime.datetime.strptime(timestring, fmt).time()
def from_iso_date(datestring, use_dateutil=True):
if dateutil_available and use_dateutil:
return parser.parse(datestring).date()
else:
return datetime.datetime.strptime(datestring[:10], '%Y-%m-%d')
def ensure_text_type(val):
if isinstance(val, binary_type):
val = val.decode('utf-8')
return text_type(val)
def pluck(dictlist, key):
"""Extracts a list of dictionary values from a list of dictionaries.
::
>>> dlist = [{'id': 1, 'name': 'foo'}, {'id': 2, 'name': 'bar'}]
>>> pluck(dlist, 'id')
[1, 2]
"""
return [d[key] for d in dictlist]
# Various utilities for pulling keyed values from objects
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if type(key) == int:
return _get_value_for_key(key, obj, default)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if isinstance(key, basestring) and hasattr(obj, key):
return getattr(obj, key)
if is_indexable_but_not_string(obj):
try:
return obj[key]
except KeyError:
return default
return default
| mit | 7,883,791,458,795,774,000 | 30.323333 | 103 | 0.638927 | false |
savi-dev/nova | nova/virt/libvirt/driver.py | 1 | 124648 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
:libvirt_type: Libvirt domain type. Can be kvm, qemu, uml, xen
(default: kvm).
:libvirt_uri: Override for the default libvirt URI (depends on libvirt_type).
:libvirt_disk_prefix: Override the default disk prefix for the devices
attached to a server.
:rescue_image_id: Rescue ami image (None = original image).
:rescue_kernel_id: Rescue aki image (None = original image).
:rescue_ramdisk_id: Rescue ari image (None = original image).
:injected_network_template: Template file for injected network
:allow_same_net_traffic: Whether to allow in project network traffic
"""
import errno
import functools
import glob
import hashlib
import multiprocessing
import os
import shutil
import sys
import tempfile
import uuid
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from xml.dom import minidom
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import vm_mode
from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
from nova.image import glance
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import firewall
from nova.virt.libvirt import config
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
default=None,
help='Rescue ami image'),
cfg.StrOpt('rescue_kernel_id',
default=None,
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
default=None,
help='Rescue ari image'),
cfg.StrOpt('libvirt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen)'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type)'),
cfg.BoolOpt('libvirt_inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('libvirt_inject_key',
default=True,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('libvirt_inject_partition',
default=1,
help='The partition to inject to : '
'-1 => inspect (libguestfs only), 0 => not partitioned, '
'>0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
default=None,
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('libvirt_vif_driver',
default='nova.virt.libvirt.vif.LibvirtBridgeDriver',
help='The libvirt VIF driver to configure the VIFs.'),
cfg.ListOpt('libvirt_volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver'
],
help='Libvirt handlers for remote volumes.'),
cfg.StrOpt('libvirt_disk_prefix',
default=None,
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on libvirt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('libvirt_wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.BoolOpt('libvirt_nonblocking',
default=True,
help='Use a separated OS thread pool to realize non-blocking'
' libvirt calls'),
# force_config_drive is a string option, to allow for future behaviors
# (e.g. use config_drive based on image properties)
cfg.StrOpt('force_config_drive',
default=None,
help='Set to force injection to take place on a config drive '
'(if set, valid options are: always)'),
cfg.StrOpt('libvirt_cpu_mode',
default=None,
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If libvirt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('libvirt_cpu_model',
default=None,
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'libvirt_cpu_mode="custom" and libvirt_type="kvm|qemu"'),
cfg.StrOpt('libvirt_snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(libvirt_opts)
flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
flags.DECLARE('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 6)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
def _get_eph_disk(ephemeral):
return 'disk.eph' + str(ephemeral['num'])
class LibvirtDriver(driver.ComputeDriver):
def __init__(self, read_only=False):
super(LibvirtDriver, self).__init__()
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
self._host_state = None
self._initiator = None
self._wrapped_conn = None
self.read_only = read_only
self.firewall_driver = firewall.load_driver(
default=DEFAULT_FIREWALL_DRIVER,
get_connection=self._get_connection)
self.vif_driver = importutils.import_object(FLAGS.libvirt_vif_driver)
self.volume_drivers = {}
for driver_str in FLAGS.libvirt_volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
self._host_state = None
disk_prefix_map = {"lxc": "", "uml": "ubd", "xen": "sd"}
if FLAGS.libvirt_disk_prefix:
self._disk_prefix = FLAGS.libvirt_disk_prefix
else:
self._disk_prefix = disk_prefix_map.get(FLAGS.libvirt_type, 'vd')
self.default_root_device = self._disk_prefix + 'a'
self.default_second_device = self._disk_prefix + 'b'
self.default_third_device = self._disk_prefix + 'c'
self.default_last_device = self._disk_prefix + 'z'
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(FLAGS.use_cow_images)
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(FLAGS.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
self._host_state = HostState(self.read_only)
return self._host_state
def has_min_version(self, ver):
libvirt_version = self._conn.getLibVersion()
def _munge_version(ver):
return ver[0] * 1000000 + ver[1] * 1000 + ver[2]
if libvirt_version < _munge_version(ver):
return False
return True
def init_host(self, host):
if not self.has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
micro = MIN_LIBVIRT_VERSION[2]
LOG.error(_('Nova requires libvirt version '
'%(major)i.%(minor)i.%(micro)i or greater.') %
locals())
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
if not FLAGS.libvirt_nonblocking:
self._wrapped_conn = self._connect(self.uri,
self.read_only)
else:
self._wrapped_conn = tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
self._connect, self.uri, self.read_only)
return self._wrapped_conn
_conn = property(_get_connection)
def _test_connection(self):
try:
self._wrapped_conn.getCapabilities()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broke'))
return False
raise
@property
def uri(self):
if FLAGS.libvirt_type == 'uml':
uri = FLAGS.libvirt_uri or 'uml:///system'
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
elif FLAGS.libvirt_type == 'lxc':
uri = FLAGS.libvirt_uri or 'lxc:///'
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
return uri
@staticmethod
def _connect(uri, read_only):
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
LOG.warning(
_("Can not handle authentication request for %d credentials")
% len(creds))
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
_connect_auth_cb,
None]
if read_only:
return libvirt.openReadOnly(uri)
else:
return libvirt.openAuth(uri, auth, 0)
def get_num_instances(self):
"""Efficient override of base instance_exists method."""
return self._conn.numOfDomains()
def instance_exists(self, instance_id):
"""Efficient override of base instance_exists method."""
try:
self._lookup_by_name(instance_id)
return True
except exception.NovaException:
return False
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
return []
return self._conn.listDomainsID()
def list_instances(self):
names = []
for domain_id in self.list_instance_ids():
try:
# We skip domains with ID 0 (hypervisors).
if domain_id != 0:
domain = self._conn.lookupByID(domain_id)
names.append(domain.name())
except libvirt.libvirtError:
# Instance was deleted while listing... ignore it
pass
return names
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self.vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self.vif_driver.unplug(instance, (network, mapping))
def _destroy(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
if virt_dom is not None:
try:
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.SHUTDOWN:
is_okay = True
if not is_okay:
LOG.error(_("Error from libvirt during destroy. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
raise
def _wait_for_destroy():
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
state = self.get_info(instance)['state']
except exception.NotFound:
LOG.error(_("During wait destroy, instance disappeared."),
instance=instance)
raise utils.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_("Instance destroyed successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.LoopingCall(_wait_for_destroy)
timer.start(interval=0.5).wait()
def destroy(self, instance, network_info, block_device_info=None):
self._destroy(instance)
self._cleanup(instance, network_info, block_device_info)
def _undefine_domain(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.NotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug(_("Error from libvirt during undefineFlags."
" Retrying with undefine"), instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during undefine. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
raise
def _cleanup(self, instance, network_info, block_device_info):
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
try:
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.error(_("Error from libvirt during unfilter. "
"Code=%(errcode)s Error=%(e)s") %
locals(), instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
target = os.path.join(FLAGS.instances_path, instance['name'])
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if FLAGS.libvirt_type == 'lxc':
container_dir = os.path.join(FLAGS.instances_path,
instance['name'],
'rootfs')
disk.destroy_container(container_dir=container_dir)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
# the instance as whole.
try:
shutil.rmtree(target)
except OSError, e:
LOG.error(_("Failed to cleanup directory %(target)s: %(e)s") %
locals())
#NOTE(bfilippov): destroy all LVM disks for this instance
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object"""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object"""
if FLAGS.libvirt_images_volume_group:
vg = os.path.join('/dev', FLAGS.libvirt_images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance['name']
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = libvirt_utils.list_logical_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': FLAGS.my_ip,
'initiator': self._initiator,
'host': FLAGS.host
}
def _cleanup_resize(self, instance, network_info):
target = os.path.join(FLAGS.instances_path,
instance['name'] + "_resize")
if os.path.exists(target):
shutil.rmtree(target)
if instance['host'] != FLAGS.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(instance, network_info)
def volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if not driver_type in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
@exception.wrap_exception()
def attach_volume(self, connection_info, instance_name, mountpoint):
virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
conf = self.volume_driver_method('connect_volume',
connection_info,
mount_device)
if FLAGS.libvirt_type == 'lxc':
self._attach_lxc_volume(conf.to_xml(), virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
# VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
# attachDevice()
domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
self._conn.defineXML(domxml)
else:
try:
flags = (libvirt.VIR_DOMAIN_AFFECT_CURRENT)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception, ex:
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
raise exception.DeviceIsBusy(device=mount_device)
with excutils.save_and_reraise_exception():
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device"""
try:
doc = etree.fromstring(xml)
except Exception:
return None
ret = doc.findall('./devices/disk')
for node in ret:
for child in node.getchildren():
if child.tag == 'target':
if child.get('dev') == device:
return etree.tostring(node)
def _get_domain_xml(self, instance):
try:
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
instance_dir = os.path.join(FLAGS.instances_path,
instance['name'])
xml_path = os.path.join(instance_dir, 'libvirt.xml')
xml = libvirt_utils.load_file(xml_path)
return xml
@exception.wrap_exception()
def detach_volume(self, connection_info, instance_name, mountpoint):
mount_device = mountpoint.rpartition("/")[2]
try:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still logout even if
# the instance doesn't exist here anymore.
virt_dom = self._lookup_by_name(instance_name)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
raise exception.DiskNotFound(location=mount_device)
if FLAGS.libvirt_type == 'lxc':
self._detach_lxc_volume(xml, virt_dom, instance_name)
# TODO(danms) once libvirt has support for LXC hotplug,
# replace this re-define with use of the
# VIR_DOMAIN_AFFECT_LIVE & VIR_DOMAIN_AFFECT_CONFIG flags with
# detachDevice()
domxml = virt_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
self._conn.defineXML(domxml)
else:
flags = (libvirt.VIR_DOMAIN_AFFECT_CURRENT)
virt_dom.detachDeviceFlags(xml, flags)
finally:
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
@exception.wrap_exception()
def _attach_lxc_volume(self, xml, virt_dom, instance_name):
LOG.info(_('attaching LXC block device'))
lxc_container_root = self.get_lxc_container_root(virt_dom)
lxc_host_volume = self.get_lxc_host_device(xml)
lxc_container_device = self.get_lxc_container_target(xml)
lxc_container_target = "%s/%s" % (lxc_container_root,
lxc_container_device)
if lxc_container_target:
disk.bind(lxc_host_volume, lxc_container_target, instance_name)
@exception.wrap_exception()
def _detach_lxc_volume(self, xml, virt_dom, instance_name):
LOG.info(_('detaching LXC block device'))
lxc_container_root = self.get_lxc_container_root(virt_dom)
lxc_container_device = self.get_lxc_container_target(xml)
lxc_container_target = "%s/%s" % (lxc_container_root,
lxc_container_device)
if lxc_container_target:
disk.unbind(lxc_container_target)
@staticmethod
def get_lxc_container_root(virt_dom):
xml = virt_dom.XMLDesc(0)
doc = etree.fromstring(xml)
filesystem_block = doc.findall('./devices/filesystem')
for cnt, filesystem_nodes in enumerate(filesystem_block):
return filesystem_nodes[cnt].get('dir')
@staticmethod
def get_lxc_host_device(xml):
dom = minidom.parseString(xml)
for device in dom.getElementsByTagName('source'):
return device.getAttribute('dev')
@staticmethod
def get_lxc_container_target(xml):
dom = minidom.parseString(xml)
for device in dom.getElementsByTagName('target'):
filesystem = device.getAttribute('dev')
return 'dev/%s' % filesystem
@exception.wrap_exception()
def snapshot(self, context, instance, image_href):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._lookup_by_name(instance['name'])
except exception.InstanceNotFound:
raise exception.InstanceNotRunning()
(image_service, image_id) = glance.get_remote_image_service(
context, instance['image_ref'])
try:
base = image_service.show(context, image_id)
except exception.ImageNotFound:
base = {}
_image_service = glance.get_remote_image_service(context, image_href)
snapshot_image_service, snapshot_image_id = _image_service
snapshot = snapshot_image_service.show(context, snapshot_image_id)
metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
}
}
if 'architecture' in base.get('properties', {}):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
source_format = base.get('disk_format') or 'raw'
if source_format == 'ami':
# NOTE(vish): assume amis are raw
source_format = 'raw'
image_format = FLAGS.snapshot_image_format or source_format
use_qcow2 = ((FLAGS.libvirt_images_type == 'default' and
FLAGS.use_cow_images) or
FLAGS.libvirt_images_type == 'qcow2')
if use_qcow2:
source_format = 'qcow2'
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = image_format
metadata['container_format'] = base.get('container_format', 'bare')
# Find the disk
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
source = domain.find('devices/disk/source')
disk_path = source.get('file')
snapshot_name = uuid.uuid4().hex
(state, _max_mem, _mem, _cpus, _t) = virt_dom.info()
state = LIBVIRT_POWER_STATE[state]
if state == power_state.RUNNING:
virt_dom.managedSave(0)
# Make the snapshot
libvirt_utils.create_snapshot(disk_path, snapshot_name)
# Export the snapshot to a raw image
snapshot_directory = FLAGS.libvirt_snapshots_directory
utils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
libvirt_utils.extract_snapshot(disk_path, source_format,
snapshot_name, out_path,
image_format)
finally:
libvirt_utils.delete_snapshot(disk_path, snapshot_name)
if state == power_state.RUNNING:
self._create_domain(domain=virt_dom)
# Upload that image to the image service
with libvirt_utils.file_open(out_path) as image_file:
image_service.update(context,
image_href,
metadata,
image_file)
@exception.wrap_exception()
def reboot(self, instance, network_info, reboot_type='SOFT',
block_device_info=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
if self._soft_reboot(instance):
LOG.info(_("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_("Failed to soft reboot instance."),
instance=instance)
return self._hard_reboot(instance, network_info,
block_device_info=block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._lookup_by_name(instance["name"])
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slighty longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
for x in xrange(FLAGS.libvirt_wait_soft_reboot_seconds):
(state, _max_mem, _mem, _cpus, _t) = dom.info()
state = LIBVIRT_POWER_STATE[state]
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = utils.LoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, instance, network_info, xml=None,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
if not xml:
xml = self._get_domain_xml(instance)
self._destroy(instance)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance rebooted successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.LoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
def pause(self, instance):
"""Pause VM instance"""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
@exception.wrap_exception()
def unpause(self, instance):
"""Unpause paused VM instance"""
dom = self._lookup_by_name(instance['name'])
dom.resume()
@exception.wrap_exception()
def power_off(self, instance):
"""Power off the specified instance"""
self._destroy(instance)
@exception.wrap_exception()
def power_on(self, instance):
"""Power on the specified instance"""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
timer = utils.LoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
def suspend(self, instance):
"""Suspend the specified instance"""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
@exception.wrap_exception()
def resume(self, instance):
"""resume the specified instance"""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
@exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted"""
xml = self._get_domain_xml(instance)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@exception.wrap_exception()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
unrescue_xml = self._get_domain_xml(instance)
unrescue_xml_path = os.path.join(FLAGS.instances_path,
instance['name'],
'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_images = {
'image_id': FLAGS.rescue_image_id or instance['image_ref'],
'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'],
'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'],
}
xml = self.to_xml(instance, network_info, image_meta,
rescue=rescue_images)
self._create_image(context, instance, xml, '.rescue', rescue_images,
network_info=network_info,
admin_pass=rescue_password)
self._destroy(instance)
self._create_domain(xml)
@exception.wrap_exception()
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
unrescue_xml_path = os.path.join(FLAGS.instances_path,
instance['name'],
'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._lookup_by_name(instance['name'])
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(FLAGS.instances_path, instance['name'],
"*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
@exception.wrap_exception()
def poll_rebooting_instances(self, timeout):
pass
@exception.wrap_exception()
def poll_rescued_instances(self, timeout):
pass
def _enable_hairpin(self, xml):
interfaces = self.get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
xml = self.to_xml(instance, network_info, image_meta,
block_device_info=block_device_info)
self._create_image(context, instance, xml, network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
LOG.debug(_("Instance is running"), instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance spawned successfully."),
instance=instance)
raise utils.LoopingCallDone()
timer = utils.LoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@exception.wrap_exception()
def get_console_output(self, instance):
virt_dom = self._lookup_by_name(instance['name'])
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
libvirt_utils.chown(path, os.getuid())
return libvirt_utils.load_file(path)
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance['name'])
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance['name'])
fpath = self._append_to_file(data, console_log)
return libvirt_utils.load_file(fpath)
@staticmethod
def get_host_ip_addr():
return FLAGS.my_ip
@exception.wrap_exception()
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO(sleepsonthefloor): use etree instead of minidom
dom = minidom.parseString(xml)
for graphic in dom.getElementsByTagName('graphics'):
if graphic.getAttribute('type') == 'vnc':
return graphic.getAttribute('port')
port = get_vnc_port_for_instance(instance['name'])
host = FLAGS.vncserver_proxyclient_address
return {'host': host, 'port': port, 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError, e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
raise e
except Exception, e:
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
raise e
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size"""
if not fs_format:
fs_format = FLAGS.default_ephemeral_format
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
if fs_format:
libvirt_utils.mkfs(fs_format, target, label)
def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type):
self._create_local(target, ephemeral_size)
disk.mkfs(os_type, fs_label, target)
@staticmethod
def _create_swap(target, swap_mb):
"""Create a swap file of specified size"""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
libvirt_utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance_name):
return os.path.join(FLAGS.instances_path, instance_name,
'console.log')
def _chown_console_log_for_instance(self, instance_name):
console_log = self._get_console_log_path(instance_name)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _create_image(self, context, instance, libvirt_xml, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None, admin_pass=None):
if not suffix:
suffix = ''
# Are we using a config drive?
using_config_drive = False
if (instance.get('config_drive') or
FLAGS.force_config_drive):
LOG.info(_('Using config drive'), instance=instance)
using_config_drive = True
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(FLAGS.instances_path,
instance['name'],
fname + suffix)
def image(fname, image_type=FLAGS.libvirt_images_type):
return self.image_backend.image(instance['name'],
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
utils.ensure_tree(basepath(suffix=''))
LOG.info(_('Creating image'), instance=instance)
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
if FLAGS.libvirt_type == 'lxc':
container_dir = os.path.join(FLAGS.instances_path,
instance['name'],
'rootfs')
utils.ensure_tree(container_dir)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance['name'])
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(basepath('console.log', ''), '', 007)
if not disk_images:
disk_images = {'image_id': instance['image_ref'],
'kernel_id': instance['kernel_id'],
'ramdisk_id': instance['ramdisk_id']}
if disk_images['kernel_id']:
fname = disk_images['kernel_id']
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
if disk_images['ramdisk_id']:
fname = disk_images['ramdisk_id']
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
root_fname = hashlib.sha1(str(disk_images['image_id'])).hexdigest()
size = instance['root_gb'] * 1024 * 1024 * 1024
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
if size == 0 or suffix == '.rescue':
size = None
if not self._volume_in_mapping(self.default_root_device,
block_device_info):
image('disk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=root_fname,
size=size,
image_id=disk_images['image_id'],
user_id=instance['user_id'],
project_id=instance['project_id'])
ephemeral_gb = instance['ephemeral_gb']
if ephemeral_gb and not self._volume_in_mapping(
self.default_second_device, block_device_info):
swap_device = self.default_third_device
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance["os_type"])
fname = "ephemeral_%s_%s_%s" % ("0",
ephemeral_gb,
instance["os_type"])
size = ephemeral_gb * 1024 * 1024 * 1024
image('disk.local').cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
else:
swap_device = self.default_second_device
for eph in driver.block_device_info_get_ephemerals(block_device_info):
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % eph['num'],
os_type=instance["os_type"])
size = eph['size'] * 1024 * 1024 * 1024
fname = "ephemeral_%s_%s_%s" % (eph['num'],
eph['size'],
instance["os_type"])
image(_get_eph_disk(eph)).cache(fetch_func=fn,
filename=fname,
size=size,
ephemeral_size=eph['size'])
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not self._volume_in_mapping(swap_device, block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * 1024 * 1024
image('disk.swap').cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# target partition for file injection
target_partition = None
if not instance['kernel_id']:
target_partition = FLAGS.libvirt_inject_partition
if target_partition == 0:
target_partition = None
if FLAGS.libvirt_type == 'lxc':
target_partition = None
if FLAGS.libvirt_inject_key and instance['key_data']:
key = str(instance['key_data'])
else:
key = None
# File injection
metadata = instance.get('metadata')
if not FLAGS.libvirt_inject_password:
admin_pass = None
net = netutils.get_injected_network_template(network_info)
# Config drive
if using_config_drive:
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md)
cdb = configdrive.ConfigDriveBuilder(instance_md=inst_md)
try:
configdrive_path = basepath(fname='disk.config')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
cdb.make_drive(configdrive_path)
finally:
cdb.cleanup()
elif any((key, net, metadata, admin_pass, files)):
# If we're not using config_drive, inject into root fs
injection_path = image('disk').path
img_id = instance['image_ref']
for injection in ('metadata', 'key', 'net', 'admin_pass',
'files'):
if locals()[injection]:
LOG.info(_('Injecting %(injection)s into image'
' %(img_id)s'), locals(), instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('Ignoring error injecting data into image '
'%(img_id)s (%(e)s)') % locals(),
instance=instance)
if FLAGS.libvirt_type == 'lxc':
disk.setup_container(basepath('disk'),
container_dir=container_dir,
use_cow=FLAGS.use_cow_images)
if FLAGS.libvirt_type == 'uml':
libvirt_utils.chown(basepath('disk'), 'root')
@staticmethod
def _volume_in_mapping(mount_device, block_device_info):
block_device_list = [block_device.strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(
block_device.strip_dev(swap['device_name']))
block_device_list += [block_device.strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return block_device.strip_dev(mount_device) in block_device_list
def get_host_capabilities(self):
"""Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host"""
xmlstr = self._conn.getCapabilities()
caps = config.LibvirtConfigCaps()
caps.parse_str(xmlstr)
return caps
def get_host_cpu_for_guest(self):
"""Returns an instance of config.LibvirtConfigGuestCPU
representing the host's CPU model & topology with
policy for configuring a guest to match"""
caps = self.get_host_capabilities()
hostcpu = caps.host.cpu
guestcpu = config.LibvirtConfigGuestCPU()
guestcpu.model = hostcpu.model
guestcpu.vendor = hostcpu.vendor
guestcpu.arch = hostcpu.arch
guestcpu.match = "exact"
for hostfeat in hostcpu.features:
guestfeat = config.LibvirtConfigGuestCPUFeature(hostfeat.name)
guestfeat.policy = "require"
return guestcpu
def get_guest_cpu_config(self):
mode = FLAGS.libvirt_cpu_mode
model = FLAGS.libvirt_cpu_model
if mode is None:
if FLAGS.libvirt_type == "kvm" or FLAGS.libvirt_type == "qemu":
mode = "host-model"
else:
mode = "none"
if mode == "none":
return None
if FLAGS.libvirt_type != "kvm" and FLAGS.libvirt_type != "qemu":
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % FLAGS.libvirt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen")
% {'mode': mode, 'model': (model or "")})
# TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
# updated to be at least this new, we can kill off the elif
# blocks here
if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
cpu = config.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
elif mode == "custom":
cpu = config.LibvirtConfigGuestCPU()
cpu.model = model
elif mode == "host-model":
cpu = self.get_host_cpu_for_guest()
elif mode == "host-passthrough":
msg = _("Passthrough of the host CPU was requested but "
"this libvirt version does not support this feature")
raise exception.NovaException(msg)
return cpu
def get_guest_storage_config(self, instance, image_meta,
rescue, block_device_info,
inst_type,
root_device_name, root_device):
devices = []
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
if FLAGS.libvirt_type == "lxc":
fs = config.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(FLAGS.instances_path,
instance['name'],
'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':
root_device_type = 'cdrom'
root_device = 'hda'
else:
root_device_type = 'disk'
if FLAGS.libvirt_type == "uml":
default_disk_bus = "uml"
elif FLAGS.libvirt_type == "xen":
default_disk_bus = "xen"
else:
default_disk_bus = "virtio"
def disk_info(name, disk_dev, disk_bus=default_disk_bus,
device_type="disk"):
image = self.image_backend.image(instance['name'],
name)
return image.libvirt_info(disk_bus,
disk_dev,
device_type,
self.disk_cachemode)
if rescue:
diskrescue = disk_info('disk.rescue',
self.default_root_device,
device_type=root_device_type)
devices.append(diskrescue)
diskos = disk_info('disk',
self.default_second_device)
devices.append(diskos)
else:
ebs_root = self._volume_in_mapping(self.default_root_device,
block_device_info)
if not ebs_root:
if root_device_type == "cdrom":
bus = "ide"
else:
bus = default_disk_bus
diskos = disk_info('disk',
root_device,
bus,
root_device_type)
devices.append(diskos)
ephemeral_device = None
if not (self._volume_in_mapping(self.default_second_device,
block_device_info) or
0 in [eph['num'] for eph in
driver.block_device_info_get_ephemerals(
block_device_info)]):
if instance['ephemeral_gb'] > 0:
ephemeral_device = self.default_second_device
if ephemeral_device is not None:
disklocal = disk_info('disk.local', ephemeral_device)
devices.append(disklocal)
if ephemeral_device is not None:
swap_device = self.default_third_device
db.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_ephemeral_device':
'/dev/' + self.default_second_device})
else:
swap_device = self.default_second_device
for eph in driver.block_device_info_get_ephemerals(
block_device_info):
diskeph = disk_info(_get_eph_disk(eph),
block_device.strip_dev(
eph['device_name']))
devices.append(diskeph)
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
diskswap = disk_info('disk.swap',
block_device.strip_dev(
swap['device_name']))
devices.append(diskswap)
elif (inst_type['swap'] > 0 and
not self._volume_in_mapping(swap_device,
block_device_info)):
diskswap = disk_info('disk.swap', swap_device)
devices.append(diskswap)
db.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'default_swap_device': '/dev/' + swap_device})
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
cfg = self.volume_driver_method('connect_volume',
connection_info,
mount_device)
devices.append(cfg)
if (instance.get('config_drive') or
instance.get('config_drive_id') or
FLAGS.force_config_drive):
diskconfig = config.LibvirtConfigGuestDisk()
diskconfig.source_type = "file"
diskconfig.driver_format = "raw"
diskconfig.driver_cache = self.disk_cachemode
diskconfig.source_path = os.path.join(FLAGS.instances_path,
instance['name'],
"disk.config")
diskconfig.target_dev = self.default_last_device
diskconfig.target_bus = default_disk_bus
devices.append(diskconfig)
return devices
def get_guest_config(self, instance, network_info, image_meta, rescue=None,
block_device_info=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
# FIXME(vish): stick this in db
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
guest = config.LibvirtConfigGuest()
guest.virt_type = FLAGS.libvirt_type
guest.name = instance['name']
guest.uuid = instance['uuid']
guest.memory = inst_type['memory_mb'] * 1024
guest.vcpus = inst_type['vcpus']
guest.cpu = self.get_guest_cpu_config()
root_device_name = driver.block_device_info_get_root(block_device_info)
if root_device_name:
root_device = block_device.strip_dev(root_device_name)
else:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
root_device = self.default_root_device
db.instance_update(
nova_context.get_admin_context(), instance['uuid'],
{'root_device_name': '/dev/' + self.default_root_device})
guest.os_type = vm_mode.get_from_instance(instance)
if guest.os_type is None:
if FLAGS.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
elif FLAGS.libvirt_type == "uml":
guest.os_type = vm_mode.UML
elif FLAGS.libvirt_type == "xen":
guest.os_type = vm_mode.XEN
else:
guest.os_type = vm_mode.HVM
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.HVM:
guest.os_loader = '/usr/lib/xen/boot/hvmloader'
if FLAGS.libvirt_type == "lxc":
guest.os_type = vm_mode.EXE
guest.os_init_path = "/sbin/init"
guest.os_cmdline = "console=ttyS0"
elif FLAGS.libvirt_type == "uml":
guest.os_type = vm_mode.UML
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name or "/dev/ubda"
else:
if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name or "/dev/xvda"
else:
guest.os_type = vm_mode.HVM
if rescue:
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(FLAGS.instances_path,
instance['name'],
"kernel.rescue")
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(FLAGS.instances_path,
instance['name'],
"ramdisk.rescue")
elif instance['kernel_id']:
guest.os_kernel = os.path.join(FLAGS.instances_path,
instance['name'],
"kernel")
if FLAGS.libvirt_type == "xen":
guest.os_cmdline = "ro"
else:
guest.os_cmdline = "root=%s console=ttyS0" % (
root_device_name or "/dev/vda",)
if instance['ramdisk_id']:
guest.os_initrd = os.path.join(FLAGS.instances_path,
instance['name'],
"ramdisk")
else:
guest.os_boot_dev = "hd"
if FLAGS.libvirt_type != "lxc" and FLAGS.libvirt_type != "uml":
guest.acpi = True
clk = config.LibvirtConfigGuestClock()
clk.offset = "utc"
guest.set_clock(clk)
if FLAGS.libvirt_type == "kvm":
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = config.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = config.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
for cfg in self.get_guest_storage_config(instance,
image_meta,
rescue,
block_device_info,
inst_type,
root_device_name,
root_device):
guest.add_device(cfg)
for (network, mapping) in network_info:
cfg = self.vif_driver.plug(instance, (network, mapping))
guest.add_device(cfg)
if FLAGS.libvirt_type == "qemu" or FLAGS.libvirt_type == "kvm":
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
consolelog = config.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = os.path.join(FLAGS.instances_path,
instance['name'],
"console.log")
guest.add_device(consolelog)
consolepty = config.LibvirtConfigGuestSerial()
consolepty.type = "pty"
guest.add_device(consolepty)
else:
consolepty = config.LibvirtConfigGuestConsole()
consolepty.type = "pty"
guest.add_device(consolepty)
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
if FLAGS.use_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = config.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
graphics = config.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = FLAGS.vnc_keymap
graphics.listen = FLAGS.vncserver_listen
guest.add_device(graphics)
return guest
def to_xml(self, instance, network_info, image_meta=None, rescue=None,
block_device_info=None):
LOG.debug(_('Starting toXML method'), instance=instance)
conf = self.get_guest_config(instance, network_info, image_meta,
rescue, block_device_info)
xml = conf.to_xml()
LOG.debug(_('Finished toXML method'), instance=instance)
return xml
def _lookup_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
return self._conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = _("Error from libvirt while looking up %(instance_name)s: "
"[Error Code %(error_code)s] %(ex)s") % locals()
raise exception.NovaException(msg)
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._lookup_by_name(instance['name'])
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': LIBVIRT_POWER_STATE[state],
'max_mem': max_mem,
'mem': mem,
'num_cpu': num_cpu,
'cpu_time': cpu_time}
def _create_domain(self, xml=None, domain=None, launch_flags=0):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
if xml:
domain = self._conn.defineXML(xml)
domain.createWithFlags(launch_flags)
self._enable_hairpin(domain.XMLDesc(0))
return domain
def _create_domain_and_network(self, xml, instance, network_info,
block_device_info=None):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('connect_volume',
connection_info,
mount_device)
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
domain = self._create_domain(xml)
self.firewall_driver.apply_instance_filter(instance, network_info)
return domain
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
devices = []
for dom_id in self.list_instance_ids():
domain = self._conn.lookupByID(dom_id)
try:
doc = etree.fromstring(domain.XMLDesc(0))
except Exception:
continue
ret = doc.findall('./devices/disk')
for node in ret:
if node.get('type') != 'block':
continue
for child in node.getchildren():
if child.tag == 'source':
devices.append(child.get('dev'))
return devices
def get_disks(self, instance_name):
"""
Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
domain = self._lookup_by_name(instance_name)
xml = domain.XMLDesc(0)
try:
doc = etree.fromstring(xml)
except Exception:
return []
return filter(bool,
[target.get("dev")
for target in doc.findall('devices/disk/target')])
def get_interfaces(self, xml):
"""
Note that this function takes an domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
ret = doc.findall('./devices/interface')
for node in ret:
devdst = None
for child in list(node):
if child.tag == 'target':
devdst = child.attrib['dev']
if devdst is None:
continue
interfaces.append(devdst)
return interfaces
@staticmethod
def get_vcpu_total():
"""Get vcpu number of physical computer.
:returns: the number of cpu core.
"""
# On certain platforms, this will raise a NotImplementedError.
try:
return multiprocessing.cpu_count()
except NotImplementedError:
LOG.warn(_("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "
"This error can be safely ignored for now."))
return 0
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def get_local_gb_total():
"""Get the total hdd size(GB) of physical computer.
:returns:
The total amount of HDD(GB).
Note that this value shows a partition where
NOVA-INST-DIR/instances mounts.
"""
stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
""" Get vcpu usage number of physical computer.
:returns: The total number of vcpu that currently used.
"""
total = 0
for dom_id in self.list_instance_ids():
dom = self._conn.lookupByID(dom_id)
vcpus = dom.vcpus()
if vcpus is None:
# dom.vcpus is not implemented for lxc, but returning 0 for
# a used count is hardly useful for something measuring usage
total += 1
else:
total += len(vcpus[1])
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
return total
def get_memory_mb_used(self):
"""Get the free memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
m = open('/proc/meminfo').read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if FLAGS.libvirt_type == 'xen':
used = 0
for domain_id in self.list_instance_ids():
# skip dom0
dom_mem = int(self._conn.lookupByID(domain_id).info()[2])
if domain_id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / 1024
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail / 1024
def get_local_gb_used(self):
"""Get the free hdd size(GB) of physical computer.
:returns:
The total usage of HDD(GB).
Note that this value shows a partition where
NOVA-INST-DIR/instances mounts.
"""
stats = libvirt_utils.get_fs_info(FLAGS.instances_path)
return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self._conn.getType()
def get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
# NOTE(justinsb): getVersion moved between libvirt versions
# Trying to do be compatible with older versions is a lost cause
# But ... we can at least give the user a nice message
method = getattr(self._conn, 'getVersion', None)
if method is None:
raise exception.NovaException(_("libvirt version is too old"
" (does not support getVersion)"))
# NOTE(justinsb): If we wanted to get the version, we could:
# method = getattr(libvirt, 'getVersion', None)
# NOTE(justinsb): This would then rely on a proper version check
return method()
def get_hypervisor_hostname(self):
"""Returns the hostname of the hypervisor."""
return self._conn.getHostname()
def get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self.get_host_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (g.arch, dt, g.ostype)
instance_caps.append(instance_cap)
return instance_caps
def get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities,
and returns as a json string.
:return: see above description
"""
caps = self.get_host_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = list()
for f in caps.host.cpu.features:
features.append(f.name)
cpu_info['features'] = features
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilties XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
return jsonutils.dumps(cpu_info)
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
"""
Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
#TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self):
"""Retrieve resource info.
This method is called as a periodic task and is used only
in live migration currently.
:returns: dictionary containing resource info
"""
dic = {'vcpus': self.get_vcpu_total(),
'memory_mb': self.get_memory_mb_total(),
'local_gb': self.get_local_gb_total(),
'vcpus_used': self.get_vcpu_used(),
'memory_mb_used': self.get_memory_mb_used(),
'local_gb_used': self.get_local_gb_used(),
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
'hypervisor_hostname': self.get_hypervisor_hostname(),
'cpu_info': self.get_cpu_info(),
'disk_available_least': self.get_disk_available_least()}
return dic
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
disk_available_mb = None
if block_migration:
disk_available_gb = self._get_compute_info(ctxt,
FLAGS.host)['disk_available_least']
disk_available_mb = \
(disk_available_gb * 1024) - FLAGS.reserved_host_disk_mb
# Compare CPU
src = instance_ref['host']
source_cpu_info = self._get_compute_info(ctxt, src)['cpu_info']
self._compare_cpu(source_cpu_info)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = FLAGS.host
filename = dest_check_data["filename"]
block_migration = dest_check_data["block_migration"]
shared = self._check_shared_storage_test_file(filename)
if block_migration:
if shared:
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(ctxt, instance_ref,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'])
elif not shared:
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
def _get_compute_info(self, context, host):
"""Get compute host's information specified by key"""
compute_node_ref = db.service_get_all_compute_by_host(context, host)
return compute_node_ref[0]['compute_node'][0]
def _assert_dest_node_has_enough_disk(self, context, instance_ref,
available_mb, disk_over_commit):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = available_mb * (1024 ** 2)
ret = self.get_instance_disk_info(instance_ref['name'])
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
instance_uuid = instance_ref['uuid']
reason = _("Unable to migrate %(instance_uuid)s: "
"Disk of instance is too large(available"
" on destination host:%(available)s "
"< need:%(necessary)s)")
raise exception.MigrationError(reason=reason % locals())
def _compare_cpu(self, cpu_info):
"""Checks the host cpu is compatible to a cpu given by xml.
"xml" must be a part of libvirt.openReadonly().getCapabilities().
return values follows by virCPUCompareResult.
if 0 > return value, do live migration.
'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
:param cpu_info: json string that shows cpu feature(see get_cpu_info())
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
info = jsonutils.loads(cpu_info)
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
cpu = config.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(config.LibvirtConfigCPUFeature(f))
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError, e:
ret = e.message
LOG.error(m % locals())
raise
if ret <= 0:
LOG.error(reason=m % locals())
raise exception.InvalidCPUInfo(reason=m % locals())
def _create_shared_storage_test_file(self):
"""Makes tmpfile under FLAGS.instance_path."""
dirpath = FLAGS.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug(_("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.") % tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under FLAGS.instances_path.
Cannot confirm tmpfile return False."""
tmp_file = os.path.join(FLAGS.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under FLAGS.instances_path."""
tmp_file = os.path.join(FLAGS.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance_ref, network_info,
time_module=None):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
if not time_module:
time_module = greenthread
self.firewall_driver.setup_basic_filtering(instance_ref, network_info)
self.firewall_driver.prepare_instance_filter(instance_ref,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(FLAGS.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance_ref,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance_ref["name"])
time_module.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params block_migration: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, do block migration.
:params migrate_data: implementation specific params
"""
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
post_method, recover_method, block_migration)
def _live_migration(self, ctxt, instance_ref, dest, post_method,
recover_method, block_migration=False):
"""Do live migration.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
"""
# Do live migration.
try:
if block_migration:
flaglist = FLAGS.block_migration_flag.split(',')
else:
flaglist = FLAGS.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
dom = self._lookup_by_name(instance_ref["name"])
dom.migrateToURI(FLAGS.live_migration_uri % dest,
logical_sum,
None,
FLAGS.live_migration_bandwidth)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Live Migration failure: %(e)s") % locals(),
instance=instance_ref)
recover_method(ctxt, instance_ref, dest, block_migration)
# Waiting for completion of live_migration.
timer = utils.LoopingCall(f=None)
def wait_for_live_migration():
"""waiting for live migration completion"""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
timer.stop()
post_method(ctxt, instance_ref, dest, block_migration)
timer.f = wait_for_live_migration
timer.start(interval=0.5).wait()
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info):
"""Preparation live migration."""
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('connect_volume',
connection_info,
mount_device)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concorrent request occurs to iptables, then it complains.
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance_ref, network_info)
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_("plug_vifs() failed %(cnt)d."
"Retry up to %(max_retry)d for %(hostname)s.")
% locals())
greenthread.sleep(1)
def pre_block_migration(self, ctxt, instance, disk_info_json):
"""Preparation block migration.
:params ctxt: security context
:params instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params disk_info_json:
json strings specified in get_instance_disk_info
"""
disk_info = jsonutils.loads(disk_info_json)
# make instance directory
instance_dir = os.path.join(FLAGS.instances_path, instance['name'])
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file']:
libvirt_utils.create_image(info['type'], instance_disk,
info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
# Remove any size tags which the cache manages
cache_name = cache_name.split('_')[0]
image = self.image_backend.image(instance['name'],
instance_disk,
FLAGS.libvirt_images_type)
image.cache(fetch_func=libvirt_utils.fetch_image,
context=ctxt,
filename=cache_name,
image_id=instance['image_ref'],
user_id=instance['user_id'],
project_id=instance['project_id'],
size=info['virt_disk_size'])
# if image has kernel and ramdisk, just download
# following normal way.
if instance['kernel_id']:
libvirt_utils.fetch_image(ctxt,
os.path.join(instance_dir, 'kernel'),
instance['kernel_id'],
instance['user_id'],
instance['project_id'])
if instance['ramdisk_id']:
libvirt_utils.fetch_image(ctxt,
os.path.join(instance_dir,
'ramdisk'),
instance['ramdisk_id'],
instance['user_id'],
instance['project_id'])
def post_live_migration_at_destination(self, ctxt,
instance_ref,
network_info,
block_migration):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network infomation
:param block_migration: if true, post operation of block_migraiton.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance_ref["name"] not in dom_list:
instance_dir = os.path.join(FLAGS.instances_path,
instance_ref["name"])
xml_path = os.path.join(instance_dir, 'libvirt.xml')
# In case of block migration, destination does not have
# libvirt.xml
if not os.path.isfile(xml_path):
xml = self.to_xml(instance_ref, network_info=network_info)
f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
f.write(xml)
f.close()
# libvirt.xml should be made by to_xml(), but libvirt
# does not accept to_xml() result, since uuid is not
# included in to_xml() result.
dom = self._lookup_by_name(instance_ref["name"])
self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, instance_name):
"""Preparation block migration.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:return:
json strings with below format::
"[{'path':'disk', 'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'},...]"
"""
disk_info = []
virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file')
if disk_type != 'file':
LOG.debug(_('skipping %(path)s since it looks like volume') %
locals())
continue
# get the real disk size or
# raise a localized error if image is unavailable
dk_size = int(os.path.getsize(path))
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
else:
backing_file = ""
virt_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size})
return jsonutils.dumps(disk_info)
def get_disk_available_least(self):
"""Return disk available least size.
The size of available disk, when block_migration command given
disk_over_commit param is FALSE.
The size that deducted real nstance disk size from the total size
of the virtual disk of all instances.
"""
# available size of the disk
dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
# Disk size that all instance uses : virtual_size - disk_size
instances_name = self.list_instances()
instances_sz = 0
for i_name in instances_name:
try:
disk_infos = jsonutils.loads(
self.get_instance_disk_info(i_name))
for info in disk_infos:
i_vt_sz = int(info['virt_disk_size'])
i_dk_sz = int(info['disk_size'])
instances_sz += i_vt_sz - i_dk_sz
except OSError as e:
if e.errno == errno.ENOENT:
LOG.error(_("Getting disk size of %(i_name)s: %(e)s") %
locals())
else:
raise
except exception.InstanceNotFound:
# Instance was deleted during the check so ignore it
pass
# NOTE(gtt116): give change to do other task.
greenthread.sleep(0)
# Disk available least size
available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
return (available_least_size / 1024 / 1024 / 1024)
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def update_host_status(self):
"""Retrieve status info from libvirt.
Query libvirt to get the state of the compute node, such
as memory and disk usage.
"""
return self.host_state.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
return self.host_state.get_host_stats(refresh=refresh)
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
#NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context):
"""Manage the local cache of images."""
self.image_cache_manager.verify_base_images(context)
@exception.wrap_exception()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
LOG.debug(_("Starting migrate_disk_and_power_off"),
instance=instance)
disk_info_text = self.get_instance_disk_info(instance['name'])
disk_info = jsonutils.loads(disk_info_text)
self.power_off(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self.volume_driver_method('disconnect_volume',
connection_info,
mount_device)
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
same_host = (dest == self.get_host_ip_addr())
inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
try:
utils.execute('mv', inst_base, inst_base_resize)
if same_host:
dest = None
utils.execute('mkdir', '-p', inst_base)
else:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if same_host:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception, e:
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
raise e
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance)['state']
if state == power_state.RUNNING:
LOG.info(_("Instance running successfully."), instance=instance)
raise utils.LoopingCallDone()
@exception.wrap_exception()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
LOG.debug(_("Starting finish_migration"), instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance['root_gb']
elif fname == 'disk.local':
size = instance['ephemeral_gb']
else:
size = 0
size *= 1024 * 1024 * 1024
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt = info['type']
if (size and fmt == 'qcow2' and
disk.can_resize_fs(info['path'], size, use_cow=True)):
path_raw = info['path'] + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', info['path'], path_raw)
utils.execute('mv', path_raw, info['path'])
fmt = 'raw'
if size:
disk.extend(info['path'], size)
if fmt == 'raw' and FLAGS.use_cow_images:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
path_qcow = info['path'] + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', info['path'], path_qcow)
utils.execute('mv', path_qcow, info['path'])
xml = self.to_xml(instance, network_info,
block_device_info=block_device_info)
# assume _create_image do nothing if a target file exists.
# TODO(oda): injecting files is not necessary
self._create_image(context, instance, xml,
network_info=network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.LoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
@exception.wrap_exception()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
LOG.debug(_("Starting finish_revert_migration"),
instance=instance)
inst_base = "%s/%s" % (FLAGS.instances_path, instance['name'])
inst_base_resize = inst_base + "_resize"
utils.execute('mv', inst_base_resize, inst_base)
xml = self.to_xml(instance, network_info,
block_device_info=block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
timer = utils.LoopingCall(self._wait_for_running, instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
def get_io_devices(xml_doc):
""" get the list of io devices from the
xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = get_io_devices(xml)
for disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(disk)
output[disk + "_read_req"] = stats[0]
output[disk + "_read"] = stats[1]
output[disk + "_write_req"] = stats[2]
output[disk + "_write"] = stats[3]
output[disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except libvirt.libvirtError:
pass
return output
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
pass
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
pass
def undo_aggregate_operation(self, context, op, aggregate_id,
host, set_error=True):
"""only used for Resource Pools"""
pass
class HostState(object):
"""Manages information about the compute node through libvirt"""
def __init__(self, read_only):
super(HostState, self).__init__()
self.read_only = read_only
self._stats = {}
self.connection = None
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run update the stats first."""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Retrieve status info from libvirt."""
LOG.debug(_("Updating host stats"))
if self.connection is None:
self.connection = LibvirtDriver(self.read_only)
data = {}
data["vcpus"] = self.connection.get_vcpu_total()
data["vcpus_used"] = self.connection.get_vcpu_used()
data["cpu_info"] = jsonutils.loads(self.connection.get_cpu_info())
data["disk_total"] = self.connection.get_local_gb_total()
data["disk_used"] = self.connection.get_local_gb_used()
data["disk_available"] = data["disk_total"] - data["disk_used"]
data["host_memory_total"] = self.connection.get_memory_mb_total()
data["host_memory_free"] = (data["host_memory_total"] -
self.connection.get_memory_mb_used())
data["hypervisor_type"] = self.connection.get_hypervisor_type()
data["hypervisor_version"] = self.connection.get_hypervisor_version()
data["hypervisor_hostname"] = self.connection.get_hypervisor_hostname()
data["supported_instances"] = \
self.connection.get_instance_capabilities()
self._stats = data
return data
| apache-2.0 | 681,890,633,713,154,700 | 39.935304 | 79 | 0.542921 | false |
dtcooper/python-fitparse | tests/test_utils.py | 1 | 2452 | #!/usr/bin/env python
import io
import os
import sys
import tempfile
try:
# Python 3.4+
from pathlib import Path
except ImportError:
Path = None
from fitparse.utils import fileish_open, is_iterable
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def testfile(filename):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'files', filename)
class UtilsTestCase(unittest.TestCase):
def test_fileish_open_read(self):
"""Test the constructor does the right thing when given different types
(specifically, test files with 8 characters, followed by an uppercase.FIT
extension), which confused the fileish check on Python 2, see
https://github.com/dtcooper/python-fitparse/issues/29#issuecomment-312436350
for details"""
def test_fopen(fileish):
with fileish_open(fileish, 'rb') as f:
self.assertIsNotNone(f.read(1))
f.seek(0, os.SEEK_SET)
test_fopen(testfile('nametest.FIT'))
with open(testfile("nametest.FIT"), 'rb') as f:
test_fopen(f)
with open(testfile("nametest.FIT"), 'rb') as f:
test_fopen(f.read())
with open(testfile("nametest.FIT"), 'rb') as f:
test_fopen(io.BytesIO(f.read()))
if Path:
test_fopen(Path(testfile('nametest.FIT')))
def test_fileish_open_write(self):
def test_fopen(fileish):
with fileish_open(fileish, 'wb') as f:
f.write(b'\x12')
f.seek(0, os.SEEK_SET)
tmpfile = tempfile.NamedTemporaryFile(prefix='fitparse-test', suffix='.FIT', delete=False)
filename = tmpfile.name
tmpfile.close()
try:
test_fopen(filename)
with open(filename, 'wb') as f:
test_fopen(f)
test_fopen(io.BytesIO())
finally:
# remove silently
try:
os.remove(filename)
except OSError:
pass
def test_is_iterable(self):
self.assertFalse(is_iterable(None))
self.assertFalse(is_iterable(1))
self.assertFalse(is_iterable('1'))
self.assertFalse(is_iterable(b'1'))
self.assertTrue(is_iterable((1, 2)))
self.assertTrue(is_iterable([1, 2]))
self.assertTrue(is_iterable(range(2)))
if __name__ == '__main__':
unittest.main()
| mit | 4,138,587,470,542,077,000 | 28.190476 | 98 | 0.591762 | false |
stvstnfrd/edx-platform | openedx/core/djangoapps/discussions/tests/test_models.py | 1 | 9897 | """
Perform basic validation of the models
"""
from unittest.mock import patch
import pytest
from django.test import TestCase
from opaque_keys.edx.keys import CourseKey
from organizations.models import Organization
from ..models import DiscussionsConfiguration
from ..models import ProviderFilter
SUPPORTED_PROVIDERS = [
'cs_comments_service',
'lti',
'test',
]
class OrganizationFilterTest(TestCase):
"""
Perform basic validation on the filter model
"""
def setUp(self):
"""
Configure shared test data
"""
super().setUp()
self.course_key = CourseKey.from_string("course-v1:Test+Course+Configured")
self.course_key_with_defaults = CourseKey.from_string("course-v1:TestX+Course+Configured")
self.organization = Organization(short_name=self.course_key.org)
self.organization.save()
self.provider_allowed = SUPPORTED_PROVIDERS[0]
self.provider_denied = SUPPORTED_PROVIDERS[1]
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_get_nonexistent(self, _default_providers):
"""
Assert we retrieve defaults when no configuration set
"""
providers = ProviderFilter.get_available_providers(self.course_key_with_defaults)
assert len(providers) == len(SUPPORTED_PROVIDERS)
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_get_allow(self, _default_providers):
"""
Assert we can set the allow list
"""
ProviderFilter.objects.create(
org=self.course_key.org,
allow=[self.provider_allowed],
)
providers = ProviderFilter.get_available_providers(self.course_key)
assert self.provider_allowed in providers
assert len(providers) == 1
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_get_deny(self, _default_providers):
"""
Assert we can set the deny list
"""
ProviderFilter.objects.create(
org=self.course_key.org,
deny=[self.provider_denied],
)
providers = ProviderFilter.get_available_providers(self.course_key)
assert self.provider_denied not in providers
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_get_allow_and_deny(self, _default_providers):
"""
Assert we can add an item to both allow and deny lists
"""
ProviderFilter.objects.create(
org=self.course_key.org,
allow=[self.provider_allowed, self.provider_denied],
deny=[self.provider_denied],
)
providers = ProviderFilter.get_available_providers(self.course_key)
assert len(providers) == 1
assert self.provider_denied not in providers
assert self.provider_allowed in providers
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_get_allow_or_deny(self, _default_providers):
"""
Assert we can exclusively add an items to both allow and deny lists
"""
ProviderFilter.objects.create(
org=self.course_key.org,
allow=[self.provider_allowed],
deny=[self.provider_denied],
)
providers = ProviderFilter.get_available_providers(self.course_key)
assert len(providers) == 1
assert self.provider_denied not in providers
assert self.provider_allowed in providers
@patch('openedx.core.djangoapps.discussions.models.get_supported_providers', return_value=SUPPORTED_PROVIDERS)
def test_override(self, _default_providers):
"""
Assert we can override a configuration and get the latest data
"""
ProviderFilter.objects.create(
org=self.course_key.org,
allow=[self.provider_allowed, self.provider_denied],
)
ProviderFilter.objects.create(
org=self.course_key.org,
allow=[self.provider_allowed],
)
providers = ProviderFilter.get_available_providers(self.course_key)
assert self.provider_allowed in providers
assert len(providers) == 1
class DiscussionsConfigurationModelTest(TestCase):
"""
Perform basic validation on the configuration model
"""
def setUp(self):
"""
Configure shared test data (configuration, course_key, etc.)
"""
super().setUp()
self.course_key_with_defaults = CourseKey.from_string("course-v1:TestX+Course+Configured")
self.course_key_without_config = CourseKey.from_string("course-v1:TestX+Course+NoConfig")
self.course_key_with_values = CourseKey.from_string("course-v1:TestX+Course+Values")
self.configuration_with_defaults = DiscussionsConfiguration(
context_key=self.course_key_with_defaults,
)
self.configuration_with_defaults.save()
self.configuration_with_values = DiscussionsConfiguration(
context_key=self.course_key_with_values,
enabled=False,
provider_type='cs_comments_service',
plugin_configuration={
'url': 'http://localhost',
},
)
self.configuration_with_values.save()
def test_get_nonexistent(self):
"""
Assert we can not fetch a non-existent record
"""
with pytest.raises(DiscussionsConfiguration.DoesNotExist):
DiscussionsConfiguration.objects.get(
context_key=self.course_key_without_config,
)
def test_get_with_defaults(self):
"""
Assert we can lookup a record with default values
"""
configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_defaults)
assert configuration is not None
assert configuration.enabled # by default
assert configuration.lti_configuration is None
assert len(configuration.plugin_configuration.keys()) == 0
assert not configuration.provider_type
def test_get_with_values(self):
"""
Assert we can lookup a record with custom values
"""
configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_values)
assert configuration is not None
assert not configuration.enabled
assert configuration.lti_configuration is None
actual_url = configuration.plugin_configuration.get('url')
expected_url = self.configuration_with_values.plugin_configuration.get('url') # pylint: disable=no-member
assert actual_url == expected_url
assert configuration.provider_type == self.configuration_with_values.provider_type
def test_update_defaults(self):
"""
Assert we can update an existing record
"""
configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_defaults)
configuration.enabled = False
configuration.plugin_configuration = {
'url': 'http://localhost',
}
configuration.provider_type = 'cs_comments_service'
configuration.save()
configuration = DiscussionsConfiguration.objects.get(context_key=self.course_key_with_defaults)
assert configuration is not None
assert not configuration.enabled
assert configuration.lti_configuration is None
assert configuration.plugin_configuration['url'] == 'http://localhost'
assert configuration.provider_type == 'cs_comments_service'
def test_is_enabled_nonexistent(self):
"""
Assert that discussions are disabled, when no configuration exists
"""
is_enabled = DiscussionsConfiguration.is_enabled(self.course_key_without_config)
assert not is_enabled
def test_is_enabled_default(self):
"""
Assert that discussions are enabled by default, when a configuration exists
"""
is_enabled = DiscussionsConfiguration.is_enabled(self.course_key_with_defaults)
assert is_enabled
def test_is_enabled_explicit(self):
"""
Assert that discussions can be explitly disabled
"""
is_enabled = DiscussionsConfiguration.is_enabled(self.course_key_with_values)
assert not is_enabled
def test_get_nonexistent_empty(self):
"""
Assert we get an "empty" model back for nonexistent records
"""
configuration = DiscussionsConfiguration.get(self.course_key_without_config)
assert configuration is not None
assert not configuration.enabled
assert not configuration.lti_configuration
assert not configuration.plugin_configuration
assert not configuration.provider_type
def test_get_defaults(self):
"""
Assert we can lookup a record with default values
"""
configuration = DiscussionsConfiguration.get(self.course_key_with_defaults)
assert configuration is not None
assert configuration.enabled
assert not configuration.lti_configuration
assert not configuration.plugin_configuration
assert not configuration.provider_type
def test_get_explicit(self):
"""
Assert we can lookup a record with explicitly-set values
"""
configuration = DiscussionsConfiguration.get(self.course_key_with_values)
assert configuration is not None
assert not configuration.enabled
assert not configuration.lti_configuration
assert configuration.plugin_configuration
assert configuration.provider_type == 'cs_comments_service'
| agpl-3.0 | -4,353,630,920,590,664,000 | 38.588 | 114 | 0.666161 | false |
zjj/trac_hack | sample-plugins/workflow/StatusFixer.py | 1 | 2533 | from genshi.builder import tag
from trac.core import Component, implements
from trac.ticket.api import ITicketActionController, TicketSystem
from trac.perm import IPermissionRequestor
revision = "$Rev: 6326 $"
url = "$URL: https://svn.edgewall.org/repos/trac/tags/trac-0.12.2/sample-plugins/workflow/StatusFixer.py $"
class StatusFixerActionController(Component):
"""Provides the admin with a way to correct a ticket's status.
This plugin is especially useful when you made changes to your workflow,
and some ticket status are no longer valid. The tickets that are in those
status can then be set to some valid state.
Don't forget to add `StatusFixerActionController` to the workflow
option in [ticket].
If there is no workflow option, the line will look like this:
workflow = ConfigurableTicketWorkflow,StatusFixerActionController
"""
implements(ITicketActionController, IPermissionRequestor)
# IPermissionRequestor methods
def get_permission_actions(self):
return ['TICKET_STATUSFIX']
# ITicketActionController methods
def get_ticket_actions(self, req, ticket):
actions = []
if 'TICKET_STATUSFIX' in req.perm(ticket.resource):
actions.append((0, 'force_status'))
return actions
def get_all_status(self):
"""We return all the status that are used in the database so that the
user can query for used, but invalid, status."""
db = self.env.get_db_cnx()
cursor = db.cursor()
cursor.execute('SELECT DISTINCT status FROM ticket')
all_status = [row[0] for row in cursor]
cursor.close()
return all_status
def render_ticket_action_control(self, req, ticket, action):
# Need to use the list of all status so you can't manually set
# something to an invalid state.
selected_value = req.args.get('force_status_value', 'new')
all_status = TicketSystem(self.env).get_all_status()
render_control = tag.select(
[tag.option(x, selected=(x == selected_value and 'selected' or
None)) for x in all_status],
id='force_status_value', name='force_status_value')
return ("force status to:", render_control,
"The next status will be the selected one")
def get_ticket_changes(self, req, ticket, action):
return {'status': req.args.get('force_status_value')}
def apply_action_side_effects(self, req, ticket, action):
pass
| bsd-3-clause | -8,738,372,976,217,055,000 | 37.969231 | 107 | 0.669562 | false |
triump0870/Interactive_Programming_Python | Stopwatch/user23_3ZEr6XFSuHrqRSj.py | 1 | 1935 | # Stopwatch: The Game
# Rohan Roy - 29th Oct 2013
# http://www.codeskulptor.org/#user23_3ZEr6XFSuHrqRSj.py
import simplegui
# define global variables
interval = 100
tens_sec = 0
message = "0:00.0"
score = 0
Is_timer = False
attempt = 0
# define helper function format that converts integer in A:BC.D format
def format(t):
global message
minutes = (t-(t%600))/600
t = t - minutes * 600
secs = (t-t%10)/10
tenth = t - secs * 10
message = '%d:%02d.%d' % (minutes,secs,tenth)
# define event handlers for buttons; "Start", "Stop", "Reset"
# Button Start
def start_btn_handler():
global Is_timer
timer.start()
Is_timer = True
# Button Stop
def stop_btn_handler():
global message
global score
global attempt
global Is_timer
timer.stop()
if Is_timer :
attempt = attempt + 1
if int(message[5]) == 0:
score = score + 1
Is_timer = False
# Button Reset
def reset_btn_handler():
timer.stop()
global tens_sec
global score
global attempt
global Is_timer
score = 0
attempt = 0
tens_sec = 0
Is_timer = False
format(tens_sec)
# define event handler for timer with 0.1 sec interval
def tick():
global tens_sec
tens_sec = tens_sec+1
format(tens_sec)
# define handler to draw on canvas
def draw(canvas):
canvas.draw_text(message, [80, 110], 40, "Red")
canvas.draw_text(str(score)+"/"+str(attempt),[250, 20], 18, "Green")
# create frame
frame = simplegui.create_frame("Stopwatch: The Game",300,200)
# register event handlers
start = frame.add_button("Start", start_btn_handler)
stop = frame.add_button("Stop", stop_btn_handler)
reset = frame.add_button("Reset", reset_btn_handler)
timer = simplegui.create_timer(interval, tick)
frame.set_draw_handler(draw)
# start timer and frame
frame.start()
| apache-2.0 | 6,260,141,441,062,355,000 | 21.035714 | 72 | 0.622222 | false |
rohitwaghchaure/New_Theme_Erp | erpnext/accounts/page/report_template/report_template.py | 1 | 1480 | from __future__ import unicode_literals
import frappe
from frappe.utils.file_manager import save_file
import os, base64, re
import random
import json
@frappe.whitelist()
def add_node():
ctype = frappe.form_dict.get('ctype')
parent_field = 'parent_' + ctype.lower().replace(' ', '_')
name_field = ctype.lower().replace(' ', '_') + '_name'
fname, content = get_uploadedImage_content(frappe.form_dict['filedata'], frappe.form_dict['filename'])
if content:
image = save_file(fname, content, 'Product Catalog', frappe.form_dict['name_field'])
doc = frappe.new_doc(ctype)
doc.update({
name_field: frappe.form_dict['name_field'],
parent_field: frappe.form_dict['parent'],
"is_group": frappe.form_dict['is_group']
})
doc.save()
return "Done"
@frappe.whitelist()
def get_uploadedImage_content(filedata, filename):
filedata = filedata.rsplit(",", 1)[1]
uploaded_content = base64.b64decode(filedata)
return filename, uploaded_content
@frappe.whitelist()
def view_image():
name = frappe.form_dict.get('name')
return frappe.db.sql(""" SELECT file_url FROM `tabFile Data` WHERE
attached_to_name='%s' AND attached_to_doctype='Product Catalog'"""%(name))
@frappe.whitelist()
def webcam_img_upload(imgdata1,customer):
data =json.loads(imgdata1)
filename=random.randrange(1,100000,2)
filename=str(filename)+'.png'
fname, content = get_uploadedImage_content(imgdata1,filename)
if content:
image = save_file(fname, content,'Customer',customer)
return fname
| agpl-3.0 | 3,505,221,346,935,491,000 | 29.204082 | 103 | 0.717568 | false |
zstackio/zstack-woodpecker | zstackwoodpecker/zstackwoodpecker/zstack_test/zstack_test_image.py | 1 | 5502 | '''
zstack image test class
@author: Youyk
'''
import apibinding.inventory as inventory
import zstackwoodpecker.header.header as zstack_header
import zstackwoodpecker.header.image as image_header
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import random
class ZstackTestImage(image_header.TestImage):
def __init__(self):
super(ZstackTestImage, self).__init__()
self.image_creation_option = test_util.ImageOption()
self.original_checking_points = []
self.delete_policy = test_lib.lib_get_delete_policy('image')
self.delete_delay_time = test_lib.lib_get_expunge_time('image')
def create(self, apiid=None, root=True):
'''
Create image template from Root Volume using CreateRootVolumeTemplateFromRootVolume
'''
if test_lib.lib_check_version_is_mevoco_1_8():
if test_lib.lib_check_version_is_mevoco():
self.image = img_ops.commit_volume_as_image_apiid(self.image_creation_option, apiid)
else:
self.image = img_ops.create_root_volume_template_apiid(self.image_creation_option, apiid)
else:
if root:
self.image = img_ops.create_root_volume_template_apiid(self.image_creation_option, apiid)
else:
self.image = img_ops.create_data_volume_template(self.image_creation_option)
super(ZstackTestImage, self).create()
def delete(self):
img_ops.delete_image(self.image.uuid)
super(ZstackTestImage, self).delete()
def recover(self):
img_ops.recover_image(self.image.uuid)
super(ZstackTestImage, self).recover()
def expunge(self, bs_uuid_list = None):
img_ops.expunge_image(self.image.uuid, bs_uuid_list)
super(ZstackTestImage, self).expunge()
def update(self):
if self.get_state() != image_header.EXPUNGED:
updated_image = test_lib.lib_get_image_by_uuid(self.image.uuid)
if updated_image:
self.image = updated_image
else:
self.set_state(image_header.EXPUNGED)
return self.image
def clean(self):
if self.delete_policy != zstack_header.DELETE_DIRECT:
if self.get_state() == image_header.DELETED:
self.expunge()
elif self.get_state() == image_header.EXPUNGED:
pass
else:
self.delete()
self.expunge()
else:
self.delete()
def export(self):
bs_uuid = self.image_creation_option.get_backup_storage_uuid_list()[0]
return img_ops.export_image_from_backup_storage(self.image.uuid, bs_uuid)
def delete_exported_image(self):
bs_uuid = self.image_creation_option.get_backup_storage_uuid_list()[0]
return img_ops.delete_exported_image_from_backup_storage(self.image.uuid, bs_uuid)
def check(self):
import zstackwoodpecker.zstack_test.checker_factory as checker_factory
checker = checker_factory.CheckerFactory().create_checker(self)
checker.check()
super(ZstackTestImage, self).check()
def set_creation_option(self, image_creation_option):
self.image_creation_option = image_creation_option
def get_creation_option(self):
return self.image_creation_option
def create_data_volume(self, ps_uuid, name = None, host_uuid = None):
import zstackwoodpecker.header.volume as volume_header
import zstackwoodpecker.zstack_test.zstack_test_volume \
as zstack_volume_header
volume_inv = vol_ops.create_volume_from_template(self.get_image().uuid,\
ps_uuid, name, host_uuid)
volume = zstack_volume_header.ZstackTestVolume()
volume.set_volume(volume_inv)
volume.set_state(volume_header.DETACHED)
volume.set_original_checking_points(self.get_original_checking_points())
super(ZstackTestImage, self).create_data_volume()
return volume
def add_data_volume_template(self):
self.set_image(img_ops.add_data_volume_template(self.get_creation_option))
return self
def add_root_volume_template(self):
self.set_image(img_ops.add_root_volume_template(self.get_creation_option()))
return self
def add_root_volume_template_apiid(self, apiid):
self.set_image(img_ops.add_root_volume_template_apiid(self.get_creation_option(), apiid))
return self
def set_original_checking_points(self, original_checking_points):
'''
If the tmpt is created from a snapshot, it should inherit snapshot's
checking points. Otherwise the previous checking points will lost, when
create a new snapshot from the volume created from current template.
'''
self.original_checking_points = original_checking_points
def get_original_checking_points(self):
return self.original_checking_points
def set_delete_policy(self, policy):
test_lib.lib_set_delete_policy(category = 'image', value = policy)
super(ZstackTestImage, self).set_delete_policy(policy)
def set_delete_delay_time(self, delay_time):
test_lib.lib_set_expunge_time(category = 'image', value = delay_time)
super(ZstackTestImage, self).set_delete_delay_time(delay_time)
| apache-2.0 | -3,450,384,789,116,033,000 | 39.455882 | 105 | 0.660851 | false |
jrabbit/ubotu-fr | src/drivers/__init__.py | 1 | 8608 | ###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2008-2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Contains various drivers (network, file, and otherwise) for using IRC objects.
"""
import sys
import time
import socket
import supybot.conf as conf
import supybot.utils as utils
import supybot.log as supylog
import supybot.ircmsgs as ircmsgs
_drivers = {}
_deadDrivers = []
_newDrivers = []
class IrcDriver(object):
"""Base class for drivers."""
def __init__(self, *args, **kwargs):
add(self.name(), self)
super(IrcDriver, self).__init__(*args, **kwargs)
def run(self):
raise NotImplementedError
def die(self):
# The end of any overrided die method should be
# "super(Class, self).die()", in order to make
# sure this (and anything else later added) is done.
remove(self.name())
def reconnect(self, wait=False):
raise NotImplementedError
def name(self):
return repr(self)
class ServersMixin(object):
def __init__(self, irc, servers=()):
self.networkGroup = conf.supybot.networks.get(irc.network)
self.servers = servers
super(ServersMixin, self).__init__()
def _getServers(self):
# We do this, rather than utils.iter.cycle the servers in __init__,
# because otherwise registry updates given as setValues or sets
# wouldn't be visible until a restart.
return self.networkGroup.servers()[:] # Be sure to copy!
def _getNextServer(self):
if not self.servers:
self.servers = self._getServers()
assert self.servers, 'Servers value for %s is empty.' % \
self.networkGroup._name
server = self.servers.pop(0)
self.currentServer = '%s:%s' % server
return server
def empty():
"""Returns whether or not the driver loop is empty."""
return (len(_drivers) + len(_newDrivers)) == 0
def add(name, driver):
"""Adds a given driver the loop with the given name."""
_newDrivers.append((name, driver))
def remove(name):
"""Removes the driver with the given name from the loop."""
_deadDrivers.append(name)
def run():
"""Runs the whole driver loop."""
for (name, driver) in _drivers.iteritems():
try:
if name not in _deadDrivers:
driver.run()
except:
log.exception('Uncaught exception in in drivers.run:')
_deadDrivers.append(name)
for name in _deadDrivers:
try:
driver = _drivers[name]
if hasattr(driver, 'irc') and driver.irc is not None:
# The Schedule driver has no irc object, or it's None.
driver.irc.driver = None
driver.irc = None
log.info('Removing driver %s.', name)
del _drivers[name]
except KeyError:
pass
while _newDrivers:
(name, driver) = _newDrivers.pop()
log.debug('Adding new driver %s.', name)
if name in _drivers:
log.warning('Driver %s already added, killing it.', name)
_drivers[name].die()
del _drivers[name]
_drivers[name] = driver
class Log(object):
"""This is used to have a nice, consistent interface for drivers to use."""
def connect(self, server):
self.info('Connecting to %s.', server)
def connectError(self, server, e):
if isinstance(e, Exception):
if isinstance(e, socket.gaierror):
e = e.args[1]
else:
e = utils.exnToString(e)
self.warning('Error connecting to %s: %s', server, e)
def disconnect(self, server, e=None):
if e:
if isinstance(e, Exception):
e = utils.exnToString(e)
else:
e = str(e)
if not e.endswith('.'):
e += '.'
self.warning('Disconnect from %s: %s', server, e)
else:
self.info('Disconnect from %s.', server)
def reconnect(self, network, when=None):
s = 'Reconnecting to %s' % network
if when is not None:
if not isinstance(when, basestring):
when = self.timestamp(when)
s += ' at %s.' % when
else:
s += '.'
self.info(s)
def die(self, irc):
self.info('Driver for %s dying.', irc)
debug = staticmethod(supylog.debug)
info = staticmethod(supylog.info)
warning = staticmethod(supylog.warning)
error = staticmethod(supylog.warning)
critical = staticmethod(supylog.critical)
timestamp = staticmethod(supylog.timestamp)
exception = staticmethod(supylog.exception)
log = Log()
def newDriver(irc, moduleName=None):
"""Returns a new driver for the given server using the irc given and using
conf.supybot.driverModule to determine what driver to pick."""
# XXX Eventually this should be made to load the drivers from a
# configurable directory in addition to the installed one.
if moduleName is None:
moduleName = conf.supybot.drivers.module()
if moduleName == 'default':
# XXX Twisted has been causing problems lately, so we're going to use
# the Socket driver by default, now. Leaving behind the code for using
# Twisted by default in case someone steps up and fixes the Twisted
# driver.
'''
try:
import supybot.drivers.Twisted
moduleName = 'supybot.drivers.Twisted'
except ImportError:
# We formerly used 'del' here, but 2.4 fixes the bug that we added
# the 'del' for, so we need to make sure we don't complain if the
# module is cleaned up already.
sys.modules.pop('supybot.drivers.Twisted', None)
moduleName = 'supybot.drivers.Socket'
'''
moduleName = 'supybot.drivers.Socket'
elif not moduleName.startswith('supybot.drivers.'):
moduleName = 'supybot.drivers.' + moduleName
driverModule = __import__(moduleName, {}, {}, ['not empty'])
log.debug('Creating new driver (%s) for %s.', moduleName, irc)
driver = driverModule.Driver(irc)
irc.driver = driver
return driver
def parseMsg(s):
start = time.time()
s = s.strip()
if s:
msg = ircmsgs.IrcMsg(s)
msg.tag('receivedAt', start)
preInFilter = str(msg).rstrip('\r\n')
n = preInFilter.find('PRIVMSG')
if n == -1:
n = preInFilter.find('NOTICE')
if n != -1:
n = preInFilter.find(':',n)
if n != -1:
n = n+1
if preInFilter[n] == '+' or preInFilter[n] == '-':
identified = preInFilter[n] == '+'
l = list(preInFilter)
l[n] = ''
preInFilter = ''.join(l)
msg = ircmsgs.IrcMsg(preInFilter)
msg.identified = identified
return msg
else:
return None
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| bsd-3-clause | 7,512,690,381,171,431,000 | 35.320675 | 79 | 0.614893 | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-certificates/azure/keyvault/certificates/__init__.py | 1 | 1350 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from ._client import CertificateClient
from ._enums import(
CertificatePolicyAction,
KeyCurveName,
KeyType,
CertificateContentType,
KeyUsageType,
WellKnownIssuerNames
)
from ._models import(
AdministratorContact,
CertificateContact,
CertificateIssuer,
CertificateOperation,
CertificateOperationError,
CertificatePolicy,
CertificateProperties,
DeletedCertificate,
IssuerProperties,
LifetimeAction,
KeyVaultCertificate,
KeyVaultCertificateIdentifier
)
from ._shared.client_base import ApiVersion
__all__ = [
"ApiVersion",
"CertificatePolicyAction",
"AdministratorContact",
"CertificateClient",
"CertificateContact",
"CertificateIssuer",
"CertificateOperation",
"CertificateOperationError",
"CertificatePolicy",
"CertificateProperties",
"DeletedCertificate",
"IssuerProperties",
"KeyCurveName",
"KeyType",
"KeyVaultCertificate",
"KeyVaultCertificateIdentifier",
"KeyUsageType",
"LifetimeAction",
"CertificateContentType",
"WellKnownIssuerNames",
"CertificateIssuer",
"IssuerProperties"
]
from ._version import VERSION
__version__ = VERSION
| mit | -2,634,080,617,875,437,000 | 23.107143 | 43 | 0.679259 | false |
gytdau/advent | Day14/part1.py | 1 | 1383 | # I wanted to try making some classes as practice.
RUNNING = True
RESTING = False
class Reindeer:
def __init__(self, line):
"""
:param line: Parses line into the class.
"""
line = line.split()
self.speed = int(line[3])
self.running_time = int(line[6])
self.resting_time = int(line[13])
def calculate_distance_at(self, time):
"""
:param time: Amount of time this race should continue for
:return: The distance this reindeer has run at the end of the race.
"""
state = RUNNING
distance = 0
state_timer = self.running_time
timer = time
for i in range(time):
if state == RUNNING:
distance += self.speed
state_timer -= 1
if state_timer <= 0:
if state == RUNNING:
state = RESTING
state_timer = self.resting_time
else:
state = RUNNING
state_timer = self.running_time
timer -= 1
if timer <= 0:
return distance
reindeer_distances = []
with open("inputData.txt", "r") as infile:
for line in infile:
testing = Reindeer(line)
reindeer_distances.append(testing.calculate_distance_at(2503))
print(str(max(reindeer_distances)))
| mit | -5,069,955,758,993,496,000 | 26.117647 | 75 | 0.529284 | false |
rwl/PyCIM | CIM14/CPSM/Equipment/Generation/Production/NuclearGeneratingUnit.py | 1 | 1571 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Generation.Production.GeneratingUnit import GeneratingUnit
class NuclearGeneratingUnit(GeneratingUnit):
"""A nuclear generating unit.
"""
def __init__(self, *args, **kw_args):
"""Initialises a new 'NuclearGeneratingUnit' instance.
"""
super(NuclearGeneratingUnit, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = []
_many_refs = []
| mit | 8,462,518,251,095,309,000 | 39.282051 | 84 | 0.728199 | false |
remontees/EliteHebergPanel | users/views.py | 1 | 1594 | #-*- coding: utf-8 -*-
from django.utils.translation import ugettext as _
from users.forms import ConnexionForm
from django.shortcuts import render
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.urlresolvers import reverse
from auth_remember import remember_user
from django.views.decorators.cache import cache_page
@cache_page(10)
def connexion(request):
error = False
if request.method == 'POST':
form = ConnexionForm(request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
remember = form.cleaned_data["remember"]
user = authenticate(username=username, password=password)
if user:
login(request, user)
# On retient l'utilisateur
if remember:
remember_user(request, user)
txtmessage = _('Vous êtes maintenant connecté.')
messages.add_message(request, messages.SUCCESS, txtmessage)
else:
error = True
else:
error = True
else:
form = ConnexionForm()
return render(request, 'users/connexion.html', {'form':form, 'error':error})
@cache_page(10)
def deconnexion(request):
logout(request)
txtmessage = _('Vous êtes maintenant déconnecté.')
messages.add_message(request, messages.SUCCESS, txtmessage)
return redirect(reverse(connexion))
| lgpl-3.0 | -3,155,211,552,788,402,700 | 33.543478 | 80 | 0.618628 | false |
adewinter/rmac | rmac/test.py | 1 | 3069 | #!/Users/adewinter/venv/rmac/bin/python
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.gridlayout import GridLayout
from kivy.uix.scrollview import ScrollView
from send import send_message as send
from pony.orm import *
from models import sms_received, sms_sent
@db_session
def add_entry_to_sent(number, message):
sms_sent(to=number, message=message)
def create_get_message_text(number, holder):
def get_message_text(instance):
text = TextInput(text=' ', multiline=False)
def send_message(instance):
print 'Text input text is: %s' % instance.text
print 'Sending to number: %s' % number
send(number, instance.text)
add_entry_to_sent(number, instance.text)
text.bind(on_text_validate=send_message)
holder.add_widget(text)
return get_message_text
def get_stored_messages():
received = sms_received.select().order_by(desc(sms_received.timestamp))
received.show()
return received
def make_message_widget(sender, text):
"""
Builds a GridLayout with appropriate label texts and buttons for actions.
"""
print 'Making widget for %s:%s' % (sender, text)
message = GridLayout(cols=1, size_hint_y=0.3)
buttons = GridLayout(cols=2, size_hint=(1,0.4))
reply_button = Button(text='Reply')
reply_button.bind(on_press=create_get_message_text(sender, message))
buttons.add_widget(reply_button)
buttons.add_widget(Button(text='Archive'))
header = Label(text='[b]Sender: %s[/b]' % sender,markup=True, font_size='20sp', size_hint=(1.0,0.3))
body = Label(text=text, size_hint=(1.0, 0.3))
message.add_widget(header)
message.add_widget(body)
message.add_widget(buttons)
return message
class TestApp(App):
@db_session
def build(self):
heading = Label(text='[color=ff3333]Message[/color][color=3333ff]Viewer[/color]',
markup=True, size_hint=(1.0, 0.10), height=50, font_size='20sp')
message_list = GridLayout(cols=1,size_hint_y=None, spacing=10,row_default_height=140, height=800)
message_list.bind(minimum_height=message_list.setter('height'))
# message_list.add_widget(heading)
# message_list.add_widget(heading)
#I don't know why I have to do this. It appears the ScrollView eats the first two widgets. So I add two dummy ones to offset
#it's horrible practice but frankly I don't care enough to fix it for real.
message_list.add_widget(make_message_widget('null', 'null'))
message_list.add_widget(make_message_widget('null', 'null'))
message_list.add_widget(heading)
for message in get_stored_messages():
m_widget = make_message_widget(message.sender, message.message)
print 'Widget made: %s:: %sx%s' % (m_widget, m_widget.width, m_widget.height)
message_list.add_widget(m_widget)
scroll_message_list=ScrollView(size_hint=(None, None), size=(800, 900))
scroll_message_list.add_widget(message_list)
# message_list.add_widget(make_message_widget('6176920786', 'This is the text of the message'))
# base.add_widget(scroll_message_list)
return scroll_message_list
TestApp().run() | gpl-2.0 | 4,017,797,271,676,093,400 | 37.375 | 127 | 0.730857 | false |
SalesforceFoundation/mrbelvedereci | metaci/api/serializers/build.py | 1 | 3283 | from rest_framework import serializers
from metaci.api.serializers.cumulusci import OrgSerializer
from metaci.api.serializers.cumulusci import ScratchOrgInstanceSerializer
from metaci.api.serializers.repository import BranchSerializer
from metaci.api.serializers.repository import RepositorySerializer
from metaci.api.serializers.plan import PlanSerializer
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import Rebuild
from metaci.cumulusci.models import Org
from metaci.plan.models import Plan
from metaci.repository.models import Branch
from metaci.repository.models import Repository
class BuildFlowSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = BuildFlow
fields = (
"id",
"build",
"error_message",
"exception",
"flow",
"log",
"rebuild",
"status",
"tests_fail",
"tests_pass",
"tests_total",
"time_end",
"time_queue",
"time_start",
)
build_flow_related_fields = list(BuildFlowSerializer.Meta.fields)
build_flow_related_fields.remove("log")
class BuildFlowRelatedSerializer(BuildFlowSerializer):
class Meta(BuildFlowSerializer.Meta):
fields = build_flow_related_fields
class RebuildSerializer(serializers.HyperlinkedModelSerializer):
org_instance = ScratchOrgInstanceSerializer(read_only=True)
class Meta:
model = Rebuild
fields = (
"id",
"build",
"error_message",
"exception",
"org_instance",
"status",
"user",
"time_end",
"time_queue",
"time_start",
)
class BuildSerializer(serializers.HyperlinkedModelSerializer):
branch = BranchSerializer(read_only=True)
branch_id = serializers.PrimaryKeyRelatedField(
queryset=Branch.objects.all(), source="branch", write_only=True
)
flows = BuildFlowRelatedSerializer(many=True, read_only=True)
org = OrgSerializer(read_only=True)
org_id = serializers.PrimaryKeyRelatedField(
queryset=Org.objects.all(), source="org", write_only=True
)
org_instance = ScratchOrgInstanceSerializer(read_only=True)
plan = PlanSerializer(read_only=True)
plan_id = serializers.PrimaryKeyRelatedField(
queryset=Plan.objects.all(), source="plan", write_only=True
)
repo = RepositorySerializer(read_only=True)
repo_id = serializers.PrimaryKeyRelatedField(
queryset=Repository.objects.all(), source="repo", write_only=True
)
class Meta:
model = Build
fields = (
"id",
"branch",
"branch_id",
"commit",
"commit_message",
"current_rebuild",
"exception",
"error_message",
"flows",
"log",
"org",
"org_id",
"org_instance",
"plan",
"plan_id",
"pr",
"repo",
"repo_id",
"status",
"tag",
"time_end",
"time_queue",
"time_start",
)
| bsd-3-clause | 6,905,202,254,614,154,000 | 28.576577 | 73 | 0.600366 | false |
mankyd/htmlmin | htmlmin/python3html/__init__.py | 1 | 4954 | """
General functions for HTML manipulation.
"""
import re as _re
try:
from html.entities import html5 as _html5
unichr = chr
except ImportError:
import htmlentitydefs
_html5 = {'apos;':u"'"}
for k, v in htmlentitydefs.name2codepoint.iteritems():
_html5[k + ';'] = unichr(v)
__all__ = ['escape', 'unescape']
def escape(s, quote=True):
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
s = s.replace("&", "&") # Must be done first!
s = s.replace("<", "<")
s = s.replace(">", ">")
if quote:
s = s.replace('"', """)
s = s.replace('\'', "'")
return s
# see http://www.w3.org/TR/html5/syntax.html#tokenizing-character-references
_invalid_charrefs = {
0x00: '\ufffd', # REPLACEMENT CHARACTER
0x0d: '\r', # CARRIAGE RETURN
0x80: '\u20ac', # EURO SIGN
0x81: '\x81', # <control>
0x82: '\u201a', # SINGLE LOW-9 QUOTATION MARK
0x83: '\u0192', # LATIN SMALL LETTER F WITH HOOK
0x84: '\u201e', # DOUBLE LOW-9 QUOTATION MARK
0x85: '\u2026', # HORIZONTAL ELLIPSIS
0x86: '\u2020', # DAGGER
0x87: '\u2021', # DOUBLE DAGGER
0x88: '\u02c6', # MODIFIER LETTER CIRCUMFLEX ACCENT
0x89: '\u2030', # PER MILLE SIGN
0x8a: '\u0160', # LATIN CAPITAL LETTER S WITH CARON
0x8b: '\u2039', # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x8c: '\u0152', # LATIN CAPITAL LIGATURE OE
0x8d: '\x8d', # <control>
0x8e: '\u017d', # LATIN CAPITAL LETTER Z WITH CARON
0x8f: '\x8f', # <control>
0x90: '\x90', # <control>
0x91: '\u2018', # LEFT SINGLE QUOTATION MARK
0x92: '\u2019', # RIGHT SINGLE QUOTATION MARK
0x93: '\u201c', # LEFT DOUBLE QUOTATION MARK
0x94: '\u201d', # RIGHT DOUBLE QUOTATION MARK
0x95: '\u2022', # BULLET
0x96: '\u2013', # EN DASH
0x97: '\u2014', # EM DASH
0x98: '\u02dc', # SMALL TILDE
0x99: '\u2122', # TRADE MARK SIGN
0x9a: '\u0161', # LATIN SMALL LETTER S WITH CARON
0x9b: '\u203a', # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
0x9c: '\u0153', # LATIN SMALL LIGATURE OE
0x9d: '\x9d', # <control>
0x9e: '\u017e', # LATIN SMALL LETTER Z WITH CARON
0x9f: '\u0178', # LATIN CAPITAL LETTER Y WITH DIAERESIS
}
_invalid_codepoints = {
# 0x0001 to 0x0008
0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8,
# 0x000E to 0x001F
0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
# 0x007F to 0x009F
0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
# 0xFDD0 to 0xFDEF
0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7, 0xfdd8,
0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf, 0xfde0, 0xfde1,
0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7, 0xfde8, 0xfde9, 0xfdea,
0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef,
# others
0xb, 0xfffe, 0xffff, 0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff,
0x4fffe, 0x4ffff, 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff,
0x8fffe, 0x8ffff, 0x9fffe, 0x9ffff, 0xafffe, 0xaffff, 0xbfffe, 0xbffff,
0xcfffe, 0xcffff, 0xdfffe, 0xdffff, 0xefffe, 0xeffff, 0xffffe, 0xfffff,
0x10fffe, 0x10ffff
}
def _replace_charref(s):
s = s.group(1)
if s[0] == '#':
# numeric charref
if s[1] in 'xX':
num = int(s[2:].rstrip(';'), 16)
else:
num = int(s[1:].rstrip(';'))
if num in _invalid_charrefs:
return _invalid_charrefs[num]
if 0xD800 <= num <= 0xDFFF or num > 0x10FFFF:
return '\uFFFD'
if num in _invalid_codepoints:
return ''
return unichr(num)
else:
# named charref
if s in _html5:
return _html5[s]
# find the longest matching name (as defined by the standard)
for x in range(len(s)-1, 1, -1):
if s[:x] in _html5:
return _html5[s[:x]] + s[x:]
else:
return '&' + s
_charref = _re.compile(r'&(#[0-9]+;?'
r'|#[xX][0-9a-fA-F]+;?'
r'|[^\t\n\f <&#;]{1,32};?)')
def unescape(s):
"""
Convert all named and numeric character references (e.g. >, >,
&x3e;) in the string s to the corresponding unicode characters.
This function uses the rules defined by the HTML 5 standard
for both valid and invalid character references, and the list of
HTML 5 named character references defined in html.entities.html5.
"""
if '&' not in s:
return s
return _charref.sub(_replace_charref, s)
| bsd-3-clause | -4,213,459,045,329,287,700 | 34.640288 | 78 | 0.589019 | false |
libor-m/scrimer | doc/conf.py | 1 | 8496 | # -*- coding: utf-8 -*-
#
# Scrimer documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 22 11:27:51 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Mocking modules to build docs on readthedocs
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['pysam', 'pybedtools', 'pyvcf', 'Bio', 'vcf']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../scripts'))
sys.path.insert(0, os.path.abspath('../scrimer'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Scrimer'
copyright = u'2013,2014 Libor Morkovsky'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Scrimerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Scrimer.tex', u'Scrimer Documentation',
u'Libor Morkovsky', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scrimer', u'Scrimer Documentation',
[u'Libor Morkovsky'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Scrimer', u'Scrimer Documentation',
u'Libor Morkovsky', 'Scrimer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| agpl-3.0 | -3,816,321,023,878,440,400 | 30.820225 | 80 | 0.689854 | false |
cloudify-cosmo/cloudify-gcp-plugin | cloudify_gcp/constants.py | 1 | 3961 | ########
# Copyright (c) 2014-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
MAX_GCP_NAME = 63
ID_HASH_CONST = 6
COMPUTE_SCOPE = 'https://www.googleapis.com/auth/compute'
MONITORING_SCOPE = 'https://www.googleapis.com/auth/monitoring'
STORAGE_SCOPE_RW = 'https://www.googleapis.com/auth/devstorage.read_write'
STORAGE_SCOPE_FULL = 'https://www.googleapis.com/auth/devstorage.full_control'
CONTAINER_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
PUB_SUB_SCOPE = 'https://www.googleapis.com/auth/pubsub'
LOGGING_SCOPE = 'https://www.googleapis.com/auth/logging.admin'
IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
CLOUDRESOURCES_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
COMPUTE_DISCOVERY = 'compute'
STORAGE_DISCOVERY = 'storage'
CONTAINER_DISCOVERY = 'container'
MONITORING_DISCOVERY = 'monitoring'
PUB_SUB_DISCOVERY = 'pubsub'
LOGGING_DISCOVERY = 'logging'
CLOUDRESOURCES_DISCOVERY = 'cloudresourcemanager'
IAM_DISCOVERY = 'iam'
CHUNKSIZE = 2 * 1024 * 1024
API_V1 = 'v1'
API_V2 = 'v2'
API_V3 = 'v3'
API_BETA = 'beta'
DISK = 'gcp_disk'
KUBERNETES_CLUSTER = 'gcp_kubernetes_cluster'
KUBERNETES_NODE_POOL = 'gcp_kubernetes_node_pool'
KUBERNETES_READY_STATUS = 'READY'
KUBERNETES_RUNNING_STATUS = 'RUNNING'
KUBERNETES_RECONCILING_STATUS = 'RECONCILING'
KUBERNETES_PROVISIONING_STATUS = 'PROVISIONING'
KUBERNETES_STOPPING_STATUS = 'STOPPING'
KUBERNETES_ERROR_STATUS = 'ERROR'
GCP_ZONE = 'gcp_zone'
HEALTH_CHECK_TYPE = 'gcp_health_check_type'
TARGET_PROXY_TYPE = 'gcp_target_proxy_type'
BACKENDS = 'gcp_backends'
IP = 'gcp_ip'
SELF_URL = 'selfLink'
ID = 'id'
TARGET_TAGS = 'targetTags'
SOURCE_TAGS = 'sourceTags'
PUBLIC_KEY = 'gcp_public_key'
PRIVATE_KEY = 'gcp_private_key'
USER = 'user'
SSH_KEYS = 'ssh_keys'
MANAGEMENT_SECURITY_GROUP = 'management_security_group'
MANAGER_AGENT_SECURITY_GROUP = 'manager_agent_security_group'
AGENTS_SECURITY_GROUP = 'agents_security_group'
SECURITY_GROUPS = [MANAGEMENT_SECURITY_GROUP,
MANAGER_AGENT_SECURITY_GROUP,
AGENTS_SECURITY_GROUP]
USE_EXTERNAL_RESOURCE = 'use_external_resource'
RESOURCE_ID = 'resource_id'
GCP_CONFIG = 'gcp_config'
AUTH = 'auth'
PROJECT = 'project'
ZONE = 'zone'
NETWORK = 'network'
NAME = 'name'
GCP_OP_DONE = 'DONE'
MANAGER_PLUGIN_FILES = os.path.join('/etc', 'cloudify', 'gcp_plugin')
GCP_DEFAULT_CONFIG_PATH = os.path.join(MANAGER_PLUGIN_FILES, 'gcp_config')
RETRY_DEFAULT_DELAY = 30
# Cloudify create node action
CREATE_NODE_ACTION = "cloudify.interfaces.lifecycle.create"
# Cloudify delete node action
DELETE_NODE_ACTION = "cloudify.interfaces.lifecycle.delete"
GCP_CREDENTIALS_SCHEMA = {
"type": "object",
"properties": {
"type": {"type": "string"},
"project_id": {"type": "string"},
"private_key_id": {"type": "string"},
"private_key": {"type": "string"},
"client_email": {"type": "string"},
"client_id": {"type": "string"},
"auth_uri": {"type": "string"},
"token_uri": {"type": "string"},
"auth_provider_x509_cert_url": {"type": "string"},
"client_x509_cert_url": {"type": "string"},
},
"required": ["type", "project_id", "private_key_id", "private_key",
"client_email", "client_id", "auth_uri",
"token_uri", "auth_provider_x509_cert_url",
"client_x509_cert_url"],
"additionalProperties": False
}
| apache-2.0 | 3,198,461,670,841,396,000 | 33.146552 | 78 | 0.690987 | false |
alu042/edx-platform | openedx/core/djangoapps/programs/tasks/v1/tasks.py | 1 | 9306 | """
This file contains celery tasks for programs-related functionality.
"""
from celery import task
from celery.utils.log import get_task_logger # pylint: disable=no-name-in-module, import-error
from django.conf import settings
from django.contrib.auth.models import User
from edx_rest_api_client.client import EdxRestApiClient
from lms.djangoapps.certificates.api import get_certificates_for_user, is_passing_status
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.utils import get_user_credentials
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.lib.token_utils import get_id_token
LOGGER = get_task_logger(__name__)
# Under cms the following setting is not defined, leading to errors during tests.
ROUTING_KEY = getattr(settings, 'CREDENTIALS_GENERATION_ROUTING_KEY', None)
def get_api_client(api_config, student):
"""
Create and configure an API client for authenticated HTTP requests.
Args:
api_config: ProgramsApiConfig or CredentialsApiConfig object
student: User object as whom to authenticate to the API
Returns:
EdxRestApiClient
"""
id_token = get_id_token(student, api_config.OAUTH2_CLIENT_NAME)
return EdxRestApiClient(api_config.internal_api_url, jwt=id_token)
def get_completed_courses(student):
"""
Determine which courses have been completed by the user.
Args:
student:
User object representing the student
Returns:
iterable of dicts with structure {'course_id': course_key, 'mode': cert_type}
"""
all_certs = get_certificates_for_user(student.username)
return [
{'course_id': unicode(cert['course_key']), 'mode': cert['type']}
for cert in all_certs
if is_passing_status(cert['status'])
]
def get_completed_programs(client, course_certificates):
"""
Given a set of completed courses, determine which programs are completed.
Args:
client:
programs API client (EdxRestApiClient)
course_certificates:
iterable of dicts with structure {'course_id': course_key, 'mode': cert_type}
Returns:
list of program ids
"""
return client.programs.complete.post({'completed_courses': course_certificates})['program_ids']
def get_awarded_certificate_programs(student):
"""
Find the ids of all the programs for which the student has already been awarded
a certificate.
Args:
student:
User object representing the student
Returns:
ids of the programs for which the student has been awarded a certificate
"""
return [
credential['credential']['program_id']
for credential in get_user_credentials(student)
if 'program_id' in credential['credential'] and credential['status'] == 'awarded'
]
def award_program_certificate(client, username, program_id):
"""
Issue a new certificate of completion to the given student for the given program.
Args:
client:
credentials API client (EdxRestApiClient)
username:
The username of the student
program_id:
id of the completed program
Returns:
None
"""
client.user_credentials.post({
'username': username,
'credential': {'program_id': program_id},
'attributes': []
})
@task(bind=True, ignore_result=True, routing_key=ROUTING_KEY)
def award_program_certificates(self, username):
"""
This task is designed to be called whenever a student's completion status
changes with respect to one or more courses (primarily, when a course
certificate is awarded).
It will consult with a variety of APIs to determine whether or not the
specified user should be awarded a certificate in one or more programs, and
use the credentials service to create said certificates if so.
This task may also be invoked independently of any course completion status
change - for example, to backpopulate missing program credentials for a
student.
Args:
username:
The username of the student
Returns:
None
"""
LOGGER.info('Running task award_program_certificates for username %s', username)
config = ProgramsApiConfig.current()
countdown = 2 ** self.request.retries
# If either programs or credentials config models are disabled for this
# feature, it may indicate a condition where processing of such tasks
# has been temporarily disabled. Since this is a recoverable situation,
# mark this task for retry instead of failing it altogether.
if not config.is_certification_enabled:
LOGGER.warning(
'Task award_program_certificates cannot be executed when program certification is disabled in API config',
)
raise self.retry(countdown=countdown, max_retries=config.max_retries)
if not CredentialsApiConfig.current().is_learner_issuance_enabled:
LOGGER.warning(
'Task award_program_certificates cannot be executed when credentials issuance is disabled in API config',
)
raise self.retry(countdown=countdown, max_retries=config.max_retries)
try:
try:
student = User.objects.get(username=username)
except User.DoesNotExist:
LOGGER.exception('Task award_program_certificates was called with invalid username %s', username)
# Don't retry for this case - just conclude the task.
return
# Fetch the set of all course runs for which the user has earned a
# certificate.
course_certs = get_completed_courses(student)
if not course_certs:
# Highly unlikely, since at present the only trigger for this task
# is the earning of a new course certificate. However, it could be
# that the transaction in which a course certificate was awarded
# was subsequently rolled back, which could lead to an empty result
# here, so we'll at least log that this happened before exiting.
#
# If this task is ever updated to support revocation of program
# certs, this branch should be removed, since it could make sense
# in that case to call this task for a user without any (valid)
# course certs.
LOGGER.warning('Task award_program_certificates was called for user %s with no completed courses', username)
return
# Invoke the Programs API completion check endpoint to identify any
# programs that are satisfied by these course completions.
programs_client = get_api_client(config, student)
program_ids = get_completed_programs(programs_client, course_certs)
if not program_ids:
# Again, no reason to continue beyond this point unless/until this
# task gets updated to support revocation of program certs.
return
# Determine which program certificates the user has already been
# awarded, if any.
existing_program_ids = get_awarded_certificate_programs(student)
except Exception as exc: # pylint: disable=broad-except
LOGGER.exception('Failed to determine program certificates to be awarded for user %s', username)
raise self.retry(exc=exc, countdown=countdown, max_retries=config.max_retries)
# For each completed program for which the student doesn't already have a
# certificate, award one now.
#
# This logic is important, because we will retry the whole task if awarding any particular program cert fails.
#
# N.B. the list is sorted to facilitate deterministic ordering, e.g. for tests.
new_program_ids = sorted(list(set(program_ids) - set(existing_program_ids)))
if new_program_ids:
try:
credentials_client = get_api_client(
CredentialsApiConfig.current(),
User.objects.get(username=settings.CREDENTIALS_SERVICE_USERNAME) # pylint: disable=no-member
)
except Exception as exc: # pylint: disable=broad-except
LOGGER.exception('Failed to create a credentials API client to award program certificates')
# Retry because a misconfiguration could be fixed
raise self.retry(exc=exc, countdown=countdown, max_retries=config.max_retries)
retry = False
for program_id in new_program_ids:
try:
award_program_certificate(credentials_client, username, program_id)
LOGGER.info('Awarded certificate for program %s to user %s', program_id, username)
except Exception: # pylint: disable=broad-except
# keep trying to award other certs, but retry the whole task to fix any missing entries
LOGGER.exception('Failed to award certificate for program %s to user %s', program_id, username)
retry = True
if retry:
# N.B. This logic assumes that this task is idempotent
LOGGER.info('Retrying task to award failed certificates to user %s', username)
raise self.retry(countdown=countdown, max_retries=config.max_retries)
| agpl-3.0 | -2,025,333,475,793,769,500 | 38.265823 | 120 | 0.678594 | false |
jaytaylor/django-pg-current-timestamp | django_pg_current_timestamp/__init__.py | 1 | 3439 | # -*- coding: utf-8 -*-
"""Adds django compatibility with / capability to use `CURRENT_TIMESTAMP` on DateTimeField objects."""
import logging
from psycopg2.extensions import ISQLQuote
__version__ = '0.2.4'
__author__ = 'Jay Taylor [@jtaylor]'
logger = logging.getLogger(__name__)
_current_timestamp_sql = 'CURRENT_TIMESTAMP'
class CurrentTimestamp(object):
def __str__(self):
return _current_timestamp_sql
def as_sql(self, qn, val):
return self.__str__(), {}
def __conform__(self, proto):
"""Does the given protocol conform to what Psycopg2 expects?"""
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def getquoted(self):
"""Returns a properly quoted string for use in PostgreSQL/PostGIS."""
# Psycopg will figure out whether to use E'\\000' or '\000'.
return self.__str__()
@classmethod
def patch(cls, field):
orig_pre_save = field.pre_save
orig_prep_db = field.get_db_prep_value
orig_prep_lookup = field.get_prep_lookup
orig_db_prep_lookup = field.get_db_prep_lookup
def pre_save(self, model_instance, add):
"""
Pre-save `CURRENT_TIMESTAMP` injector.
NB: The returned value is what will go into the database, and the `timezone.now()` value will be set on the model instance.
"""
from django.utils import timezone # Import here to avoid `pip install ..` issues.
if self.auto_now or (self.auto_now_add and add):
value = CurrentTimestamp()
setattr(model_instance, self.attname, timezone.now()) # Attach an approximate TS to the object.
return value
else:
return orig_pre_save(self, model_instance, add)
def prep_db_value(self, value, connection, prepared=False):
result = value if isinstance(value, cls) else orig_prep_db(self, value, connection, prepared)
#logger.debug('prep_db_value :: name={} type(result)={} result={}'.format(self.name, type(result), result))
return result
def prep_lookup(self, lookup_type, value):
result = value if isinstance(value, cls) else orig_prep_lookup(self, lookup_type, value)
#logger.debug('prep_lookup :: name={} type(result)={} result={}'.format(self.name, type(result), result))
return result
def prep_db_lookup(self, lookup_type, value, connection, prepared=True):
result = value if isinstance(value, cls) else orig_db_prep_lookup(self, lookup_type, value, connection=connection, prepared=True)
#logger.debug('prep_db_lookup :: name={} type(result)={} result={}'.format(self.name, type(result), result))
return result
field.pre_save = pre_save
field.get_db_prep_value = prep_db_value
field.get_prep_lookup = prep_lookup
field.get_db_prep_lookup = prep_db_lookup
def init():
"""Activation for automatic support of DateTimeField fields with `auto_now` and/or `auto_now_add` columns."""
from django.db.models import DateTimeField
logger.info('django_pg_current_timestamp :: Monkey-patching django.db.models.DateTimeField to enable automatic `CURRENT_TIMESTAMP` support for DateTimeField')
CurrentTimestamp.patch(DateTimeField)
| bsd-3-clause | -4,983,507,088,101,478,000 | 38.988372 | 162 | 0.63565 | false |
ekarulf/pyactiveresource | src/tests/connection_test.py | 1 | 7742 | #!/usr/bin/env python
# Copyright 2008 Google Inc. All Rights Reserved.
'''Tests for connection objects.'''
__author__ = 'Mark Roach ([email protected])'
import unittest
import urllib2
from StringIO import StringIO
from pyactiveresource import connection
from pyactiveresource import util
from pyactiveresource.tests import http_fake
class Error(Exception):
pass
class ConnectionTest(unittest.TestCase):
def setUp(self):
'''Create test objects.'''
matz = {'id': 1, 'name': 'Matz'}
david = {'id': 2, 'name': 'David'}
self.matz = util.to_xml(matz, root='person')
self.david = util.to_xml(david, root='person')
self.people = util.to_xml([matz, david], root='people')
self.people_single = util.to_xml(
[matz], root='people-single-elements')
self.people_empty = util.to_xml([], root='people-empty-elements')
http_fake.initialize()
self.http = http_fake.TestHandler
self.http.site = 'http://localhost'
self.http.set_response(Error('Bad request'))
self.zero_length_content_headers = {'Content-Length': '0',
'Content-Type': 'application/xml'}
self.header = {'Key': 'value'}
self.connection = connection.Connection(self.http.site)
def assert_response_raises(self, error, code):
response = urllib2.HTTPError('', code, '', {}, StringIO(''))
self.http.set_response(response)
self.assertRaises(error, self.connection._open, '', '')
def test_handle_bad_request(self):
# 400 is a bad request (e.g. malformed URI or missing request parameter)
self.assert_response_raises(connection.BadRequest, 400)
def test_handle_valid_response(self):
# 2xx and 3xx are valid responses.
for code in [200, 299, 300, 399]:
response = http_fake.FakeResponse(code, str(code))
self.http.set_response(response)
self.assertEquals(self.connection._open('', ''),
connection.Response(code, str(code)))
def test_handle_unauthorized_access(self):
# 401 is an unauthorized request
self.assert_response_raises(connection.UnauthorizedAccess, 401)
def test_handle_forbidden_access(self):
# 403 is a forbidden requst (and authorizing will not help)
self.assert_response_raises(connection.ForbiddenAccess, 403)
def test_handle_resource_not_found(self):
# 404 is a missing resource.
self.assert_response_raises(connection.ResourceNotFound, 404)
def test_handle_method_not_allowed(self):
# 405 is a missing not allowed error
self.assert_response_raises(connection.MethodNotAllowed, 405)
def test_handle_resource_conflict(self):
# 409 is an optimistic locking error
self.assert_response_raises(connection.ResourceConflict, 409)
def test_handle_resource_invalid(self):
# 422 is a validation error
self.assert_response_raises(connection.ResourceInvalid, 422)
def test_handle_client_error(self):
# 4xx are client errors.
for code in [402, 499]:
self.assert_response_raises(connection.ClientError, code)
def test_handle_server_error(self):
# 5xx are server errors.
for code in [500, 599]:
self.assert_response_raises(connection.ServerError, code)
def test_handle_connection_error(self):
# Others are unknown.
for code in [199, 600]:
self.assert_response_raises(connection.ConnectionError, code)
def test_timeout_attribute(self):
self.connection.timeout = 7
self.assertEqual(7, self.connection.timeout)
def test_initialize_raises_argument_error_on_missing_site(self):
self.assertRaises(Exception, connection.Connection, None)
def test_get(self):
self.http.respond_to(
'GET', 'http://localhost/people/1.xml', {}, self.matz)
matz = self.connection.get('/people/1.xml')
self.assertEqual(matz['name'], 'Matz')
def test_head(self):
self.http.respond_to('HEAD', 'http://localhost/people/1.xml', {}, '')
self.assertFalse(self.connection.head('/people/1.xml').body)
def test_get_with_header(self):
self.http.respond_to(
'GET', 'http://localhost/people/2.xml', self.header, self.david)
david = self.connection.get('/people/2.xml', self.header)
self.assertEqual(david['name'], 'David')
def test_get_collection(self):
self.http.respond_to('GET', '/people.xml', {}, self.people)
people = self.connection.get('/people.xml')
self.assertEqual('Matz', people[0]['name'])
self.assertEqual('David', people[1]['name'])
def test_get_collection_single(self):
self.http.respond_to('GET', '/people_single_elements.xml', {},
self.people_single)
people = self.connection.get('/people_single_elements.xml')
self.assertEqual('Matz', people[0]['name'])
def test_get_collection_empty(self):
self.http.respond_to('GET', '/people_empty_elements.xml', {},
self.people_empty)
people = self.connection.get('/people_empty_elements.xml')
self.assertEqual([], people)
def test_post(self):
self.http.respond_to(
'POST', '/people.xml', self.zero_length_content_headers,
'', 200, {'Location': '/people/5.xml'})
response = self.connection.post('/people.xml')
self.assertEqual('/people/5.xml', response['Location'])
def test_post_with_header(self):
header = self.header
header.update(self.zero_length_content_headers)
self.http.respond_to(
'POST', '/members.xml', self.header,
'', 201, {'Location': '/people/6.xml'})
response = self.connection.post('/members.xml', self.header)
self.assertEqual('/people/6.xml', response['Location'])
def test_put(self):
self.http.respond_to('PUT', '/people/1.xml',
self.zero_length_content_headers, '', 204)
response = self.connection.put('/people/1.xml')
self.assertEqual(204, response.code)
def test_put_with_header(self):
header = self.header
header.update(self.zero_length_content_headers)
self.http.respond_to('PUT', '/people/2.xml', header, '', 204)
response = self.connection.put('/people/2.xml', self.header)
self.assertEqual(204, response.code)
def test_delete(self):
self.http.respond_to('DELETE', '/people/1.xml', {}, '')
response = self.connection.delete('/people/1.xml')
self.assertEqual(200, response.code)
def test_delete_with_header(self):
self.http.respond_to('DELETE', '/people/2.xml', self.header, '')
response = self.connection.delete('/people/2.xml', self.header)
self.assertEqual(200, response.code)
'''
ResponseHeaderStub = Struct.new(:code, :message, 'Allow')
def test_should_return_allowed_methods_for_method_no_allowed_exception
begin
handle_response ResponseHeaderStub.new(405, 'HTTP Failed...', 'GET, POST')
rescue connection.MethodNotAllowed => e
self.assertEqual('Failed with 405 HTTP Failed...', e.message
self.assertEqual([:get, :post], e.allowed_methods
uses_mocha('test_timeout') do
def test_timeout
@http = mock('new Net::HTTP')
self.connection.expects(:http).returns(@http)
@http.expects(:get).raises(Timeout::Error, 'execution expired')
assert_raises(connection.TimeoutError) { self.connection.get('/people_timeout.xml') }
'''
if __name__ == '__main__':
unittest.main()
| mit | -5,389,994,014,899,453,000 | 37.71 | 91 | 0.629424 | false |
LinuxChristian/pyTopoTools | pyTopoTools.py | 1 | 23401 | # Python package to perform quick analysis on topographic data
# Copyright (C) 2015 Christian Braedstrup
# This file is part of pyTopoTools.
#
# pyTopoTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published by
# the Free Software Foundation.
#
# pyTopoTools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyTopoTools. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
#import psd
import gdal
import pdb
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.patches as patches
import operator
from scipy.io import loadmat
from matplotlib.colors import LogNorm
#from mpl_toolkits.mplot3d.axes3d import Axes3D
#import parallelTopoTools as ptt
import statsmodels.api as sm
import seaborn as sns
import copy
def detrend(M):
ny, nx = M.shape
X, Y = np.meshgrid(range(nx),range(ny))
A = np.vstack([np.ones(nx*ny) ,X.flatten(), Y.flatten()])
# Compute lstsq fit to plane
coeff, resid,rank,sigma = np.linalg.lstsq(A.T,(M.flatten()))
# at each (x,y) point, subtract the value of the fitted plane from M
# Zp = a + b*Y + c*X
P = (coeff[0] + coeff[2]*Y + coeff[1]*X)
D = M - P
return D, P
def Hann2D(M):
ny, nx = M.shape
a = (nx+1)/2.0
b = (ny+1)/2.0 # matrix coordinates of centroid of M
[X, Y] = np.meshgrid(range(nx), range(ny))
theta = (X==a)*(np.pi/2)
theta += (X!=a)*np.arctan2((Y-b),(X-a)) # angular polar coordinate
r = np.sqrt(pow2(Y-b) + pow2(X-a)) # radial polar coordinate
r1 = pow2(b)*pow2(np.cos(theta)) + pow2(a)*pow2(np.sin(theta))
r2 = pow2(a)*pow2(b)*np.power(r1,-1)
rprime = np.sqrt(r2) # 'radius' of ellipse for this theta
hanncoeff = (r < rprime)*(0.5*(1 + np.cos(np.pi*r/rprime)));
H = M*hanncoeff;
Wss = np.sum(np.sum(pow2(hanncoeff)));
return [H, Wss]
def filterDEM(Z, dx, f, filttype):
'''
Filter the matrix Z and return
'''
Ny, Nx = Z.shape
Zp = np.zeros((Nx,Nx))
Zp[0:Ny,0:Nx] = Z
Pm, fm, Pv, fv = powerspectrum(Zp,dx,pad=0,window=0)
F = zfilter(fm,f,filttype)
ft = np.fft.fftshift(np.fft.fft2(Zp))
Zf = np.real(np.fft.ifft2(np.fft.ifftshift(ft*F)))
return Zf[0:Ny,0:Nx], F
def detectLowRelief(Z,wstep=5,lrlim=500.0,elelim=1000.0):
'''
Given a topography matrix this function returns
a binary map of low relief surfaces at high elevation
input:
---------
Z: 2D Topography matrix
wstep: Steps of box width to compute (i.e. a list of widths to compute)
lrlim: Limit for what a low relief is
elelim: Cut off level for when surfaces become too low in elevation
output:
---------
Zbin: Binary low relief matrix
'''
Zbin = np.zeros(Z.shape)
for w in wstep:
print(w)
ZLoc = localRelief2D(Z,w)
Zbin[ZLoc < lrlim] += 1
Zbin[Z < elelim] = 0
return Zbin
def plotLowRelief(Z,ZLowRe,boxsize,ext=None,cmap=None,mlab=False,fax=None):
'''
Plots the results of detectLowRelief
INPUT:
------
Z: 2D Topography matrix
ZLowRe: Output from detectLowRelief
boxsize: Size of boxes used in meters
Z0: Initial Topography matix (optional)
ZLow0: Initial low relief matrix
ext: extent of model
cmap: Colormap
mlab: 3D plotting with mayavi
fig: Figure handle
'''
if ext is None:
Ny, Nx = Z.shape
ext = [0, Ny, Nx, 0]
if cmap is None:
cmap=plt.get_cmap('jet')
if mlab:
print("Still no 3D function")
else:
if fax is None:
fig = plt.figure()
fax = fig.add_subplot(111)
fax.matshow(hillshade(Z,315,65),extent=ext,cmap=plt.get_cmap('bone'))
im_bed = fax.imshow(Z,extent=ext,cmap=cmap_center_adjust(plt.get_cmap('terrain'), 0.65),alpha=0.8)
z_masked = np.ma.masked_where(ZLowRe < 1 , ZLowRe)
plt.hold(True)
plt.title('Geophysical relief')
im_gr = fax.imshow(z_masked,extent=ext,cmap=cmap,vmin=1.0,vmax=len(boxsize))
if 'fig' in locals():
cax = fig.colorbar(im_gr,orientation='horizontal')
cax.set_ticks(np.arange(len(boxsize)+1))
cax.set_ticklabels(boxsize)
plt.show()
def zfilter(fmat, f, filttype):
if (filttype is 'lowpass'):
flo = f[0]; fhi = f[1];
mu=flo;
sigma=np.abs(fhi-flo)/3;
F=Gaussian(fmat,mu,sigma);
F[fmat<flo]=1;
elif (filttype is 'highpass'):
flo = f[0]; fhi = f[1];
mu=fhi;
sigma=np.abs(fhi-flo)/3;
F=Gaussian(fmat,mu,sigma);
F[fmat>=fhi]=1;
elif (filttype is 'bandpass'):
flo1 = f[0]; flo2 = f[1];
fhi1 = f[2]; fhi2 = f[3];
sigmalo = np.abs(flo2-flo1)/3;
sigmahi = np.abs(fhi2-fhi1)/3;
mulo=flo2;
muhi=fhi1;
Flo=Gaussian(fmat,mulo,sigmalo);
Fhi=Gaussian(fmat,muhi,sigmahi);
F = Flo * [fmat<=mulo] + Fhi *(fmat>=muhi) + 1*(fmat>mulo and fmat<muhi);
elif (filttype is 'orientation'):
# F is the radial frequency matrix
# Slice away frequencies between f[0] and
# f
Ny, Nx = fmat.shape
x = np.linspace(0,1,Nx/2)
y = np.linspace(0,1,Ny/2)
X,Y = np.meshgrid(x,y)
theta = np.zeros(fmat.shape)
theta[:Ny/2,Nx/2:Nx] = np.rad2deg(np.arctan(np.rot90(Y/X))) # 0 - 90
theta[Ny/2:Ny,Nx/2:Nx] = np.rad2deg(np.arctan(Y/X))+90.0 # 90 - 180
theta[Ny/2:Ny,:Nx/2] = np.rad2deg(np.arctan(np.rot90(Y/X,3)))+180.0 # 180 - 270
theta[:Ny/2,:Nx/2] = np.rad2deg(np.arctan(np.rot90(Y/X,2)))+270.0 # 270 - 360
F = np.zeros(fmat.shape)
F[np.where(np.logical_and(theta>=f[0],theta<=f[1]))] = 1.0
F[np.where(np.logical_and(theta>=f[0]+180.0,theta<=f[1]+180.0))] = 1.0
return F
# Color adjust code found on this page
# https://sites.google.com/site/theodoregoetz/notes/matplotlib_colormapadjust
def cmap_powerlaw_adjust(cmap, a):
'''
returns a new colormap based on the one given
but adjusted via power-law:
newcmap = oldcmap**a
'''
if a < 0.:
return cmap
cdict = copy.copy(cmap._segmentdata)
fn = lambda x : (x[0]**a, x[1], x[2])
for key in ('red','green','blue'):
cdict[key] = map(fn, cdict[key])
cdict[key].sort()
assert (cdict[key][0]<0 or cdict[key][-1]>1), \
"Resulting indices extend out of the [0, 1] segment."
return colors.LinearSegmentedColormap('colormap',cdict,1024)
def cmap_center_adjust(cmap, center_ratio):
'''
returns a new colormap based on the one given
but adjusted so that the old center point higher
(>0.5) or lower (<0.5)
'''
if not (0. < center_ratio) & (center_ratio < 1.):
return cmap
a = np.log(center_ratio) / np.log(0.5)
return cmap_powerlaw_adjust(cmap, a)
def cmap_center_point_adjust(cmap, range, center):
'''
converts center to a ratio between 0 and 1 of the
range given and calls cmap_center_adjust(). returns
a new adjusted colormap accordingly
'''
if not ((range[0] < center) and (center < range[1])):
return cmap
return cmap_center_adjust(cmap,
abs(center - range[0]) / abs(range[1] - range[0]))
def localReliefBand(Z,cent,width,dim=0):
'''
Computes the local relief along a row/column in the
matrix.
input:
---------
Z: 2D Topography matrix
cent: Center of the band (pixel)
width: Width/2 of the band (pixel)
dim: Dimension to compute along (0/1)
output:
---------
bmin: minimum along the band
bmax: maximum along the band
bmean: mean along the band
blr: local relief along the band
'''
if dim:
# Compute along second dimension in matrix
bmax = np.max(Z[:,cent-width:cent+width],axis=dim)
bmin = np.min(Z[:,cent-width:cent+width],axis=dim)
bmean = np.mean(Z[:,cent-width:cent+width],axis=dim)
blr = bmax-bmin
else:
# Compute along first dimension in matrix
bmax = np.max(Z[cent-width:cent+width,:],axis=dim)
bmin = np.min(Z[cent-width:cent+width,:],axis=dim)
bmean = np.mean(Z[cent-width:cent+width,:],axis=dim)
blr = bmax-bmin
return bmin,bmax,bmean,blr
def test_localRelief():
Z = np.matrix([
[1,1,1,1,1,1,1,1,1],
[1,2,2,2,2,2,2,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,4,5,4,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,2,2,2,2,2,2,1],
[1,1,1,1,1,1,1,1,1]
])
assert 1 == localRelief(Z,[4,4],1,9,9)
assert 2 == localRelief(Z,[4,4],2,9,9)
assert 3 == localRelief(Z,[4,4],3,9,9)
assert 4 == localRelief(Z,[4,4],4,9,9)
assert 0.0 == localRelief(Z,[4,4],0,9,9)
#@profile
def localRelief(Z,c,w,Nx,Ny):
'''
Given a center point in pixel and a box width this function
computes the local relief with that rectangle.
Notice that w is the distance in each direction. The
box width is therefore 2*w. (i.e. w=1 is the area within 1 pixel
from the center c).
input:
-------------
Z: Topography matrix
c: (x,y) of center point in pixel
w: Width of area to compute local relief within (halv box width)
Nx: Number of cells in x
Ny: Number of cells in y
'''
# Boundary conditions
xl = c[0]-w if c[0]-w > 0 else 0
xr = c[0]+w if c[0]+w < Nx else Nx
yl = c[1]-w if c[1]-w > 0 else 0
yr = c[1]+w if c[1]+w < Ny else Ny
sli = Z[yl:yr,xl:xr]
if (len(sli) > 0):
# return ptt.mima(sli)
# return np.max(sli)-np.min(sli)
return np.amax(sli)-np.amin(sli)
else:
return 0.0
def test_localRelief2D():
Z = np.matrix([
[1,1,1,1,1,1,1,1,1],
[1,2,2,2,2,2,2,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,4,5,4,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,2,2,2,2,2,2,1],
[1,1,1,1,1,1,1,1,1]
])
b = localRelief2D(Z,4)
assert 4 == b[4,4]
def localRelief2D(Z,width=5,walti=False):
'''
Computes the local relief using a window function
with default width of 5 px in each direction.
The window function is compute by the function localRelief.
input:
-------------
Z: Topography matrix
width: Width of area to compute local relief within
walti: Compute mean relief in altitude bands
'''
Ny,Nx = Z.shape
Zloc = np.ones(Z.shape)*1e4 # Start with a high local relief everywhere
d = width
# print(Ny,Nx)
for x in np.linspace(d,Nx-d,Nx-2*d):
for y in np.linspace(d,Ny-d,Ny-2*d):
Zloc[y,x] = localRelief(Z,[x,y],d,Nx,Ny)
# Group relief into altitude bands of 50 meters
if walti:
gmin = np.amin(Z)
gmin = gmin-np.mod(gmin,100)+300.0
gmax = np.amax(Z)
gmax = gmax-np.mod(gmax,100)-100.0
rrange = np.arange(gmin,gmax,50.0)
Zele = np.zeros( (len(rrange),2) ) # Matrix to hold elevatio and mean local relief
# Only work with even 100 numbers
for i in range(len(rrange)):
if i == 0:
Zele[0,0] = rrange[0]
Zele[0,1] = np.mean(Zloc[Z < rrange[0]])
elif i == len(rrange)-1:
Zele[-1,0] = rrange[-1]
Zele[-1,1] = np.mean(Zloc[Z > rrange[-1]])
else:
Zele[i,0] = rrange[i]
Zele[i,1] = np.mean(Zloc[np.where(np.logical_and(Z>rrange[i],Z<rrange[i+1]))])
return Zloc, Zele
else:
return Zloc
def test_boxRelief():
Z = np.matrix([
[1,1,1,1,1,1,1,1,1],
[1,2,2,2,2,2,2,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,4,5,4,3,2,1],
[1,2,3,4,4,4,3,2,1],
[1,2,3,3,3,3,3,2,1],
[1,2,2,2,2,2,2,2,1],
[1,1,1,1,1,1,1,1,1]
])
assert 4 == boxRelief(Z,1,1,[4,4],4,rn=2,evenly=True)
def boxRelief(Z,dx,dy,c,w=5,rn=10,evenly=False,plot=False):
'''
Computes relief within a box of
width w (cells) with rn intervals
centered around c [xc,yc].
INPUT
------
Z: 2D Topography matrix
dx: X spacing
dy: Y spacing
c: Box center point
w: Box width
rn: Number of box increases
evenly: Grow box evenly or just in one direction
plot: Plot result
'''
Nx,Ny = Z.shape
L = dx*Nx; H = dy*Ny;
lrelief = np.zeros((rn-1))
# Evenly distribute
if evenly is True:
for i in np.arange(1,rn):
lrelief[i-1] = localRelief(Z,c,w*i,Nx,Ny)
else:
for i in np.arange(1,rn):
# Boundary conditions
xl = c[0] if w > 0 else c[0]+i*w
xr = c[0]+i*w if w > 0 else c[0]
yl = c[1] if w > 0 else c[1]+i*w
yr = c[1]+i*w if w > 0 else c[1]
sli = Z[yl:yr,xl:xr]
if (len(sli) > 0):
lrelief[i-1] = np.amax(sli)-np.amin(sli)
if plot:
fig = plt.figure()
ax1 = fig.add_subplot(211)
plt.plot(np.arange(1,rn)*np.sqrt(dx*dx+dy*dy),lrelief,'-*')
plt.title('Box relief')
plt.ylabel('Relief')
plt.xlabel('Box width (m)')
ax2 = fig.add_subplot(212)
plt.imshow(Z)
plt.hold(True)
for i in np.arange(1,rn,2):
ax2.add_patch(patches.Rectangle(c,i*w,i*w,fill=None,color='k'))
plt.colorbar()
plt.show()
print(lrelief)
return lrelief
def bslope(Z,dx=1,dy=1,cmin=0.0,cmax=1e10):
'''
Computes the local bed slope as
the laplacian for the bed.
'''
Zgx, Zgy = np.gradient(Z,dx,dy)
if cmin != 0.0 or cmax != 1e10:
return np.clip(np.sqrt(pow2(Zgx)+pow2(Zgy)),cmin,cmax)
else:
return np.sqrt(pow2(Zgx)+pow2(Zgy))
def analyseDEM(Z,dx,dy,forceshow=False,w=5,title='',trans=False,corner=[0.0,0.0],divcmap=None,seqcmap=None):
'''
Wrapper function to provide a quick
overview of a DEM.
Plots shadedrelief, local relief and
bed gradient.
INPUT:
--------
Z: 2D Topography Matrix
dx: Cell width in x-dimension
dy: Cell width in y-dimension
forceshow: Show plot after function call
w: Box width for geophysical relief
title: Super title for plot
trans: Transpose Z matrix for plotting
corner: Coordinates of (x0,y0)
cmap: custom colormap
'''
if trans:
Z = Z.T
Ny, Nx = Z.shape
x0 = corner[0]
x1 = x0 + Nx*dx
x1 = x1/1e3 # to km
y0 = corner[1]
y1 = y0 + Ny*dy
y1 = y1/1e3 # to km
# Use custom Seaborn color palettes
if divcmap is None:
# Diverging cmap
divcmap = sns.diverging_palette(220, 20, n=7, as_cmap=True)
if seqcmap is None:
# Sequential cmap
seqcmap = sns.light_palette("green", reverse=True, as_cmap=True)
plt.figure()
plt.subplot(221)
plt.imshow(Z,cmap=plt.get_cmap('terrain'),extent=[y0, y1, x1, x0],aspect='auto')
plt.title("Elevation",fontsize=12)
plt.ylabel('Km')
plt.colorbar(orientation='horizontal')
plt.subplot(222)
Zloc = localRelief2D(Z,w)
plt.imshow(Zloc,vmax=150,extent=[y0, y1, x1, x0],aspect='auto',cmap=seqcmap)
plt.title("Local relief - Radius %i"%(np.sqrt(pow2(w*dx)+pow2(w*dy))))
plt.ylabel('Km')
plt.colorbar(orientation='horizontal')
plt.subplot(223)
Zslo = bslope(Z,dx,dy,cmax=1.0)
plt.imshow(Zslo,vmax=0.3,extent=[y0, y1, x1, x0],aspect='auto',cmap=divcmap)
plt.title("Gradient")
plt.ylabel('Km')
plt.colorbar(orientation='horizontal')
plt.subplot(224)
ZlocZ = np.clip(Zloc/Z,0.0,1)
plt.imshow(ZlocZ,extent=[y0, y1, x1, x0],aspect='auto',cmap=divcmap)
plt.ylabel('Km')
plt.title("Local Relief/Elevation")
plt.colorbar(orientation='horizontal')
plt.tight_layout()
if forceshow:
plt.show()
return Zloc, Zslo, ZlocZ
def PowerspecOverview(Z,dx,dy=0):
'''
Wrapper function to give a quick overview
of the topography
'''
Z, P = detrend(Z)
Pm0, fm0, Pv0, fv0 = powerspectrum(Z,dx,pad=1,window=1)
xc, Bp, ppoly = denoise(fv0,Pv0,remove=3)
ffit = ppoly.coeffs # c + a*x, [a,c]
print(ffit)
# Plot results
plt.figure()
ax1 = plt.subplot(211)
plt.loglog(fv0,Pv0,'ko')
plt.hold(True)
plt.loglog(np.power(10,xc),np.power(10,Bp),'ro')
# plt.plot(xc,(np.power(10,ffit[0])*np.power(xc,ffit[1])),'r-')
plt.plot(np.power(10,xc[3:]),np.power(10,ffit[0]*xc[3:]+ffit[1]),'r-')
# xrang = ax1.get_xticks()
# x2rang = np.divide(1.0,xrang)
# ax2 = ax1.twiny()
# print(x2rang)
# ax1.set_xticks(xrang,str(x2rang))
ax1.set_xlabel('frequency (1/m)', color='k')
# ax2.set_xlim(x2rang)
ax1.set_xscale('log')
plt.ylabel('Mean-squared amplitude (m^2)')
plt.title('Full 1D Powerspectrum')
ax1 = plt.subplot(212)
Pmm = Pm0/(np.power(10,ffit[1])*np.power(fm0,ffit[0]))
plt.plot(fm0,Pmm,'k*')
ax1.set_xscale('log')
plt.title('Filtered 1D Powerspectrum')
plt.xlabel('frequency (1/m)')
plt.ylabel('Normalized power')
plt.tight_layout()
plt.figure()
# [400:600,400:600]
plt.imshow(np.log10(Pmm),norm=LogNorm(0.1,1))
plt.colorbar()
plt.title('2D Powerspectrum')
Nx, Ny = Pm0.shape
L = Nx*dx
H = Ny*dy
nq = 1/(2*Nx)
plt.xticks(np.linspace(0,Nx,10),np.linspace(-nq,nq,10))
plt.show()
def plotDEM(Z,hshade=False,t=''):
if (hshade is True):
plt.matshow(hillshade(Z,315,45),cmap='Greys')
else:
plt.matshow(Z)
plt.title(t)
plt.colorbar()
plt.show()
def plotVariable(varname,cmap=None,trans=False,title=''):
'''
Plot the variable from SPM output files
INPUT:
-------
var: variable to plot
clim: color limits
'''
if cmap is None:
cmap = sns.cubehelix_palette(8,as_cmap=True)
if trans:
varname = varname.T
plt.figure()
ax = plt.imshow(varname,cmap=cmap)
plt.title(title)
cbar = plt.colorbar(ax)
def outputGeoTiff(Z,outfile,trans,dim,prj=None):
"""
Outputs a matrix Z as a geotiff file.
INPUT
---------
Z: 2D Topography Matrix
outfile: Path to output file
trans: Transform matrix [x0, dx, 0.0, 0.0, y0, dy]
dim: [Nx,Ny]
prj: Projection infomation
"""
output_raster = gdal.GetDriverByName('GTiff').Create(outfile, dim[0], dim[1], 1 ,gdal.GDT_Float32)
output_raster.SetGeoTransform(trans) # Specify its coordinates
if prj is not None:
srs = osr.SpatialReference(wkt=prj) # Establish its coordinate encoding
output_raster.SetProjection( srs.ExportToWkt() ) # Exports the coordinate system to the file
output_raster.GetRasterBand(1).WriteArray(Z) # Writes my array to the raster
output_raster = None
def hillshade(array, azimuth, angle_altitude):
x, y = np.gradient(array)
slope = np.pi/2. - np.arctan(np.sqrt(x*x + y*y))
aspect = np.arctan2(-x, y)
azimuthrad = azimuth*np.pi / 180.
altituderad = angle_altitude*np.pi / 180.
shaded = np.sin(altituderad) * np.sin(slope)\
+ np.cos(altituderad) * np.cos(slope)\
* np.cos(azimuthrad - aspect)
return 255*(shaded + 1)/2
def denoise(fv,Pv,remove=0):
'''
Remove noise by using a power-law fit
'''
fv10 = np.log10(fv)
nbin = 20; # Number of bins
Bf,bedge = np.histogram(fv10,bins=nbin); # Bin the log-transformed data
# Compute bin centers
xc = np.array([ np.mean([bedge[i],bedge[i+1]]) for i in range(len(bedge)-1)])
Bp = np.zeros(len(Bf))
for i in range(0,len(bedge)-1):
if i==0:
Bp[i] = np.mean(Pv[fv10<bedge[i]])
elif i==len(bedge)-1:
Bp[i] = np.mean(Pv[fv10>bedge[i]])
else:
Bp[i] = np.mean(Pv[np.where(np.logical_and(fv10>bedge[i-1],fv10<bedge[i]))])
Bp = np.log10(Bp)
Bp[np.isnan(Bp)]=0.0
psqrt = np.polyfit(xc[remove:].flatten(),Bp[remove:].flatten(),1)
# if (remove == -1):
# A = np.vstack([np.zeros(nbin) ,xc.flatten()])
# else:
# A = np.vstack([np.zeros(nbin-remove) ,xc[remove:].flatten()])
# print(xc[remove:].flatten())
# Compute lstsq fit to line
# coeff, resid,rank,sigma = np.linalg.lstsq(A.T,(Bp[remove:].flatten()))
return xc, Bp, np.poly1d(psqrt)
def Gaussian(freqmat,mu,sigma):
G=np.exp(-pow2(freqmat-mu)/(2*pow2(sigma)))
G=G/np.amax(G)
return G
def radial_profile(data, center):
y, x = np.indices(data.shape)
r = np.sqrt((x - center[0])**2 + (y - center[1])**2)
r = r.astype(np.int)
tbin = np.bincount(r.ravel(), data.ravel())
nr = np.bincount(r.ravel())
radialprofile = tbin / nr
return radialprofile
def show(M,typ=0):
ny, nx = M.shape
X, Y = np.meshgrid(range(nx), range(ny))
fig = plt.figure()
if typ == 1:
ax = fig.add_subplot(1,1,1,projection='3d')
ax.plot_surface(X,Y,M)
else:
plt.pcolormesh(M)
plt.colorbar()
plt.show()
def pow2(x):
return np.power(x,2)
def powerspectrum(M, dx, dy=0, pad=0, window=0):
if dy==0:
dy=dx
ny, nx = M.shape
# Data padding
if pad:
# calculate the power of 2 to pad with zeros
Lx = int(np.power(2,(np.ceil(np.log(np.max([nx, ny]))/np.log(2)))))
Ly = int(Lx)
else:
# no zero padding
Lx = int(nx)
Ly = int(ny)
if window:
# window the matrix with an elliptical Hann (raised cosine) window
M, Wss = Hann2D(M)
else:
# do not window (really, a square window with a value of 1)
Wss = np.sum(np.sum(np.ones((ny, nx))));
# calculate the frequency increments: frequency goes from zero (DC) to
# 1/(2*dx) (Nyquist in x direction) in Lx/2 increments; analogous for y.
dfx = 1/float(dx*Lx)
dfy = 1/float(dy*Ly)
M = np.rot90(np.fft.fftshift(np.fft.fft2(M,(Ly,Lx))))
M = np.real(M * np.conj(M)) / (Lx * Ly * Wss)
M[Ly/2+1,Lx/2+1]=0
# assign the power spectrum to the output argument
Pm = M
# Create a matrix of radial frequencies
xc = Lx/2
yc = Ly/2
cols, rows = np.meshgrid(range(Lx),range(Ly))
fm = np.sqrt(pow2(dfy*(rows-yc)) + pow2(dfx*(cols-xc))) # frequency matrix
# Create sorted, non-redundant vectors of frequency and power
M = M[:,range(Lx/2+1)]
fv = fm[:,range(Lx/2+1)]
fv[yc:Ly,xc-1] = -1
# Sort frequency vector and powerspec vector
fv = fv.flatten(1)
fvIdx = fv.argsort() # Get sorted index
Pv = Pm.flatten(1)
fv = fv[fvIdx]
Pv = Pv[fvIdx]
# Remove negative frequencies
Pv = Pv[fv>0]
fv = fv[fv>0]
# Separate into power and frequency vectors and assign to output arguments
Pv = 2*Pv
return [Pm,fm,Pv,fv]
| gpl-2.0 | -5,911,803,817,516,443,000 | 27.748157 | 108 | 0.575787 | false |
frederick623/pb | report_per_client/pb_report_distro.py | 1 | 6670 | import os
import sys
import re
import xlrd
import csv
import glob
import shutil
import comtypes.client
PATH_DICT = { "pb_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne",
"cln_filename": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\ClientDetails_????????.xlsx",
"distro_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_Report_Distribution",
"include_dir": ["\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Cash Projection",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Collateral_Statement",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Loan_Interest_Statement",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Interest Rebate Statement",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\MTM_Valuation_Report\\By Counterparty",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Risk Exposure report",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\SBL Borrow Report",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\CashEntry",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\TradeDetails",
"\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Hedging_Notice",
],
"replace_cpty": {"SPRUCE LIGHT ABS RET F FF-02031051590ED":"SPRUCE LIGHT ABSOL RET F-02031051590ED",
"CANAL EAST FUND FF-02031076330ED":"CANAL EAST FUND-02031076330ED",
"PRIME CHINA SERV LTD FF-02033979930ED":"PRIME CHINA SERV LTD-02033979930ED",
},
"exclude_files": ["Daily_IntStatement_"],
"daily_src_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data",
"daily_des_dir": "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_FA_daily",
}
def csv_to_arr(csv_file, start=0, has_header=True, delim=',', encoding='utf-8'):
arr = []
reader = []
if "http" in csv_file:
response = requests.get(csv_file)
text = response.content.decode(encoding)
else:
text = open(csv_file, 'rU')
reader = csv.reader(text, delimiter=delim)
arr = list(reader)
header = ""
if has_header:
header = ','.join(arr[start])
arr = arr[start+1:]
return re.sub(r"[\*\.#/\$%\"\(\)& \_-]", "", header), arr
else:
return arr[start:]
return
def xlsx_to_arr(xlsx_file, worksheet=0, row_start=0, col_start=0, row_end=-1, col_end=-1):
arr = []
wb = xlrd.open_workbook(xlsx_file)
ws = None
try:
ws = wb.sheet_by_index(worksheet)
except:
ws = wb.sheet_by_name(worksheet)
row_end = ws.nrows if row_end == -1 else row_end
col_end = ws.ncols if col_end == -1 else col_end
arr = [ws.row_values(row, start_colx=col_start, end_colx=col_end) for row in range(row_start, row_end)]
header = ','.join(x if x not in arr[0][:n] else x+str(n) for n, x in enumerate(arr[0]) )
return re.sub(r"[\*\.#/\$%\"\(\)& \_]", "", header), arr[1:]
def report_cpy(src_file, cpty):
des_str = src_file.replace(PATH_DICT["pb_dir"], PATH_DICT["distro_dir"])
orig_cpty = cpty
if cpty in list(PATH_DICT["replace_cpty"].keys()):
des_str = des_str.replace(cpty, PATH_DICT["replace_cpty"][cpty])
cpty = PATH_DICT["replace_cpty"][cpty]
des_arr = des_str.split('\\')
cpty_pos = des_arr.index(cpty)
report_pos = len(PATH_DICT["distro_dir"].split('\\'))
# Change to sort of counterparty at destination
des_arr[cpty_pos], des_arr[report_pos] = des_arr[report_pos], des_arr[cpty_pos]
# Remove excess hierarchy
for i in range(report_pos+1, len(des_arr)-1):
des_arr.pop(i)
des_folder = ('\\'.join(des_arr[:-1])).replace(" ", "_")
# Make dir if not existed
if not os.path.exists(des_folder):
os.makedirs(des_folder)
# Insert cpty name string for those without one
src_dir, src_filename = os.path.split(src_file)
des_filename = src_filename if orig_cpty in src_filename else src_filename[::-1].replace("_","_"+orig_cpty[::-1]+"_",1)[::-1]
shutil.copy2(src_file, os.path.join(des_folder, des_filename))
print (src_file + "->" + os.path.join(des_folder, des_filename))
return
def report_iter(tgt_dir, cpty_arr, dt_str):
for cpty in cpty_arr:
for root, dirs, files in os.walk(os.path.join(tgt_dir, cpty)):
# Match this trade date only
files = [f for f in files if (dt_str in f and not f[0] == '~' ) ]
if len(files) > 0:
# Find immediate folders containing reports
report_cpy(max([os.path.join(root, fl) for fl in files], key=os.path.getctime), cpty)
return
def folder_match(tgt_dir, pattern, dt_str):
filepath_arr = []
for root, dirs, files in os.walk(tgt_dir):
if any(inc_dir in root for inc_dir in PATH_DICT["include_dir"]):
# Find path containing client report folder
cpty_arr = set(dirs).intersection(pattern)
if len(cpty_arr) > 0:
report_iter(root, cpty_arr, dt_str)
return
def cpy_daily_data(dt_str):
fa_files = glob.iglob(PATH_DICT["daily_src_dir"] + "\\[!~]*" + dt_str + ".xlsx")
for fa_file in fa_files:
shutil.copy2(fa_file, PATH_DICT["daily_des_dir"])
print (fa_file + ' -> ' + PATH_DICT["daily_des_dir"])
return
def client_list():
cln_file = max(glob.iglob(PATH_DICT["cln_filename"]))
# cln_file = "\\\\p7fs0003\\nd\\3033-Horizon-FA-Share\\PB_DeltaOne\\Daily_Data\\ClientDetails_20180531.xlsx"
print (cln_file)
cln_header, cln_arr = xlsx_to_arr(cln_file, row_start=1)
# Return client ID list
return [row[0] for row in cln_arr], cln_file[-13:-5]
def cln_report_folder():
for fl in glob.iglob(PATH_DICT["distro_dir"] + "\\*"):
shutil.rmtree(fl)
return
def search_rtf_replace():
tgt_dir = PATH_DICT["distro_dir"]
word = comtypes.client.CreateObject('Word.Application')
wdFormatPDF = 17
for root, dirs, files in os.walk(tgt_dir):
for in_file in files:
if ".rtf" in in_file.lower():
in_file = os.path.join(root, in_file)
doc = word.Documents.Open(in_file)
outfile = in_file.replace(".rtf", ".pdf")
doc.SaveAs(outfile, FileFormat=wdFormatPDF)
print (in_file + " -> " + outfile)
doc.Close()
os.remove(in_file)
word.Quit()
return
def remove_excl_files():
tgt_dir = PATH_DICT["distro_dir"]
for root, dirs, files in os.walk(tgt_dir):
for in_file in files:
for excl_file in PATH_DICT["exclude_files"]:
if excl_file in in_file:
in_file = os.path.join(root, in_file)
os.remove(in_file)
print ("Removed: " + in_file)
return
def main():
print ("PB Report Distribution")
cln_report_folder()
cln_arr, dt_str = client_list()
# Copy daily data to safe folder
cpy_daily_data(dt_str)
folder_match(PATH_DICT["pb_dir"], cln_arr, dt_str)
search_rtf_replace()
remove_excl_files()
return
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print ("Ctrl+C pressed. Stopping...") | apache-2.0 | -3,772,540,698,761,566,000 | 33.035714 | 126 | 0.651274 | false |
NetApp/manila | manila_tempest_tests/tests/api/admin/test_share_manage.py | 1 | 9833 | # Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
class ManageNFSShareTest(base.BaseSharesAdminTest):
protocol = 'nfs'
# NOTE(vponomaryov): be careful running these tests using generic driver
# because cinder volumes will stay attached to service Nova VM and
# won't be deleted.
@classmethod
@testtools.skipIf(
CONF.share.multitenancy_enabled,
"Only for driver_handles_share_servers = False driver mode.")
@testtools.skipUnless(
CONF.share.run_manage_unmanage_tests,
"Manage/unmanage tests are disabled.")
def resource_setup(cls):
super(ManageNFSShareTest, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled" % cls.protocol
raise cls.skipException(message)
# Create share types
cls.st_name = data_utils.rand_name("manage-st-name")
cls.st_name_invalid = data_utils.rand_name("manage-st-name-invalid")
cls.extra_specs = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': False,
'snapshot_support': six.text_type(
CONF.share.capability_snapshot_support),
}
cls.extra_specs_invalid = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': True,
'snapshot_support': six.text_type(
CONF.share.capability_snapshot_support),
}
cls.st = cls.create_share_type(
name=cls.st_name,
cleanup_in_class=True,
extra_specs=cls.extra_specs)
cls.st_invalid = cls.create_share_type(
name=cls.st_name_invalid,
cleanup_in_class=True,
extra_specs=cls.extra_specs_invalid)
creation_data = {'kwargs': {
'share_type_id': cls.st['share_type']['id'],
'share_protocol': cls.protocol,
}}
# Data for creating shares in parallel
data = [creation_data, creation_data]
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.5"):
data.append(creation_data)
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.8"):
data.append(creation_data)
if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.16"):
data.append(creation_data)
shares_created = cls.create_shares(data)
cls.shares = []
# Load all share data (host, etc.)
for share in shares_created:
# Unmanage shares from manila
get_share = cls.shares_v2_client.get_share(share['id'])
if utils.is_microversion_ge(
CONF.share.max_api_microversion, "2.9"):
get_share["export_locations"] = (
cls.shares_v2_client.list_share_export_locations(
share["id"])
)
cls.shares.append(get_share)
cls.shares_client.unmanage_share(share['id'])
cls.shares_client.wait_for_resource_deletion(
share_id=share['id'])
def _test_manage(self, share, is_public=False,
version=CONF.share.max_api_microversion):
name = "Name for 'managed' share that had ID %s" % share['id']
description = "Description for 'managed' share"
# Manage share
managed_share = self.shares_v2_client.manage_share(
service_host=share['host'],
export_path=share['export_locations'][0],
protocol=share['share_proto'],
share_type_id=self.st['share_type']['id'],
name=name,
description=description,
is_public=is_public,
version=version,
)
# Add managed share to cleanup queue
self.method_resources.insert(
0, {'type': 'share', 'id': managed_share['id'],
'client': self.shares_client})
# Wait for success
self.shares_v2_client.wait_for_share_status(managed_share['id'],
'available')
# Verify data of managed share
self.assertEqual(name, managed_share['name'])
self.assertEqual(description, managed_share['description'])
self.assertEqual(share['host'], managed_share['host'])
self.assertEqual(share['share_proto'], managed_share['share_proto'])
if utils.is_microversion_ge(version, "2.6"):
self.assertEqual(self.st['share_type']['id'],
managed_share['share_type'])
else:
self.assertEqual(self.st['share_type']['name'],
managed_share['share_type'])
if utils.is_microversion_ge(version, "2.8"):
self.assertEqual(is_public, managed_share['is_public'])
else:
self.assertFalse(managed_share['is_public'])
if utils.is_microversion_ge(version, "2.16"):
self.assertEqual(share['user_id'], managed_share['user_id'])
else:
self.assertNotIn('user_id', managed_share)
# Delete share
self.shares_v2_client.delete_share(managed_share['id'])
self.shares_v2_client.wait_for_resource_deletion(
share_id=managed_share['id'])
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_share,
managed_share['id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.5")
def test_manage_with_os_share_manage_url(self):
self._test_manage(share=self.shares[2], version="2.5")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.8")
def test_manage_with_is_public_True(self):
self._test_manage(share=self.shares[3], is_public=True, version="2.8")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.16")
def test_manage_show_user_id(self):
self._test_manage(share=self.shares[4], version="2.16")
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_manage(self):
# After 'unmanage' operation, share instance should be deleted.
# Assert not related to 'manage' test, but placed here for
# resource optimization.
share_instance_list = self.shares_v2_client.list_share_instances()
share_ids = [si['share_id'] for si in share_instance_list]
self.assertNotIn(self.shares[0]['id'], share_ids)
self._test_manage(share=self.shares[0])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_manage_invalid(self):
# Try to manage share with invalid parameters, it should not succeed
# because the scheduler will reject it. If it succeeds, then this test
# case failed. Then, in order to remove the resource from backend, we
# need to manage it again, properly, so we can delete it. Consequently
# the second part of this test also tests that manage operation with a
# proper share type works.
def _delete_share(share_id):
self.shares_v2_client.reset_state(share_id)
self.shares_v2_client.delete_share(share_id)
self.shares_v2_client.wait_for_resource_deletion(share_id=share_id)
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.get_share,
share_id)
share = self.shares_v2_client.manage_share(
service_host=self.shares[1]['host'],
export_path=self.shares[1]['export_locations'][0],
protocol=self.shares[1]['share_proto'],
share_type_id=self.st_invalid['share_type']['id'])
self.addCleanup(_delete_share, share['id'])
self.shares_v2_client.wait_for_share_status(
share['id'], 'manage_error')
share = self.shares_v2_client.get_share(share['id'])
self.assertEqual(1, int(share['size']))
# Delete resource from backend. We need to manage the share properly
# so it can be removed.
share = self.shares_v2_client.manage_share(
service_host=self.shares[1]['host'],
export_path=self.shares[1]['export_locations'][0],
protocol=self.shares[1]['share_proto'],
share_type_id=self.st['share_type']['id'])
self.addCleanup(_delete_share, share['id'])
self.shares_v2_client.wait_for_share_status(
share['id'], 'available')
class ManageCIFSShareTest(ManageNFSShareTest):
protocol = 'cifs'
class ManageGLUSTERFSShareTest(ManageNFSShareTest):
protocol = 'glusterfs'
class ManageHDFSShareTest(ManageNFSShareTest):
protocol = 'hdfs'
class ManageCephFSShareTest(ManageNFSShareTest):
protocol = 'cephfs'
| apache-2.0 | 1,042,462,149,853,028,900 | 39.29918 | 79 | 0.615784 | false |
benrudolph/commcare-hq | custom/ilsgateway/tanzania/warehouse_updater.py | 1 | 29503 | from datetime import datetime, timedelta
import logging
import itertools
from celery.canvas import chain
from celery.task import task
from django.db import transaction
from django.db.models import Q
from corehq.apps.products.models import SQLProduct
from corehq.apps.locations.models import Location, SQLLocation
from dimagi.utils.chunked import chunked
from dimagi.utils.dates import get_business_day_of_month, add_months, months_between
from casexml.apps.stock.models import StockReport, StockTransaction
from custom.ilsgateway.models import SupplyPointStatus, SupplyPointStatusTypes, DeliveryGroups, \
OrganizationSummary, GroupSummary, SupplyPointStatusValues, Alert, ProductAvailabilityData, \
SupplyPointWarehouseRecord, HistoricalLocationGroup, ILSGatewayConfig
"""
These functions and variables are ported from:
https://github.com/dimagi/logistics/blob/tz-master/logistics_project/apps/tanzania/reporting/run_reports.py
"""
NEEDED_STATUS_TYPES = [SupplyPointStatusTypes.DELIVERY_FACILITY,
SupplyPointStatusTypes.R_AND_R_FACILITY,
SupplyPointStatusTypes.SUPERVISION_FACILITY,
SupplyPointStatusTypes.SOH_FACILITY]
NO_PRIMARY_CONTACT = 'no_primary_contact'
PRODUCT_STOCKOUT = 'product_stockout'
RR_NOT_SUBMITTED = 'rr_' + SupplyPointStatusValues.NOT_SUBMITTED
RR_NOT_RESPONDED = 'rr_not_responded'
DELIVERY_NOT_RECEIVED = 'delivery_' + SupplyPointStatusValues.NOT_RECEIVED
DELIVERY_NOT_RESPONDING = 'delivery_not_responding'
SOH_NOT_RESPONDING = 'soh_not_responding'
TEST_REGION_ID = 21
def _is_valid_status(facility, date, status_type):
if status_type not in NEEDED_STATUS_TYPES:
return False
groups = HistoricalLocationGroup.objects.filter(
date__month=date.month,
date__year=date.year,
location_id=facility.sql_location
)
if (not facility.metadata.get('group', None)) and (groups.count() == 0):
return False
if status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
return True
if groups.count() > 0:
codes = [group.group for group in groups]
else:
try:
latest_group = HistoricalLocationGroup.objects.filter(
location_id=facility.sql_location
).latest('date')
if date.date() < latest_group.date:
return False
else:
codes = [facility.metadata['group']]
except HistoricalLocationGroup.DoesNotExist:
codes = [facility.metadata['group']]
dg = DeliveryGroups(date.month)
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
return dg.current_submitting_group() in codes
elif status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
return dg.current_delivering_group() in codes
return True
def _get_window_date(status_type, date):
# we need this method because the soh and super reports actually
# are sometimes treated as reports for _next_ month
if status_type == SupplyPointStatusTypes.SOH_FACILITY or \
status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
# if the date is after the last business day of the month
# count it for the next month
if date.date() >= get_business_day_of_month(date.year, date.month, -1):
year, month = add_months(date.year, date.month, 1)
return datetime(year, month, 1)
return datetime(date.year, date.month, 1)
def is_on_time(status_date, warehouse_date, status_type):
"""
on_time requirement
SOH report should be submitted before 6th business day of the month.
R & R report should be submitted before 13th business day of the month.
Otherwise reports are marked as late response.
"""
if status_type == SupplyPointStatusTypes.SOH_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 6):
return True
if status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
if status_date.date() < get_business_day_of_month(warehouse_date.year, warehouse_date.month, 13):
return True
return False
def average_lead_time(facility_id, window_date):
end_date = datetime(window_date.year, window_date.month % 12 + 1, 1)
received = SupplyPointStatus.objects.filter(
supply_point=facility_id,
status_date__lt=end_date,
status_value=SupplyPointStatusValues.RECEIVED,
status_type=SupplyPointStatusTypes.DELIVERY_FACILITY).order_by('status_date')
total_time = timedelta(days=0)
count = 0
last_receipt = datetime(1900, 1, 1)
for receipt in received:
if receipt.status_date - last_receipt < timedelta(days=30):
last_receipt = receipt.status_date
continue
last_receipt = receipt.status_date
last_submitted = SupplyPointStatus.objects.filter(
supply_point=facility_id,
status_date__lt=receipt.status_date,
status_value=SupplyPointStatusValues.SUBMITTED,
status_type=SupplyPointStatusTypes.R_AND_R_FACILITY).order_by('-status_date')
if last_submitted.count():
ltime = receipt.status_date - last_submitted[0].status_date
if timedelta(days=30) < ltime < timedelta(days=100):
total_time += ltime
count += 1
else:
continue
return total_time / count if count else None
def needed_status_types(org_summary):
facility = Location.get(docid=org_summary.supply_point)
return [status_type for status_type in NEEDED_STATUS_TYPES if _is_valid_status(facility,
org_summary.date, status_type)]
def not_responding_facility(org_summary):
for status_type in needed_status_types(org_summary):
group_summary, created = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=status_type)
group_summary.total = 1
assert group_summary.responded in (0, 1)
if group_summary.title == SupplyPointStatusTypes.SOH_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.supply_point, org_summary.date,
'soh_not_responding', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.R_AND_R_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.supply_point, org_summary.date,
'rr_not_responded', {'number': 1})
elif group_summary.title == SupplyPointStatusTypes.DELIVERY_FACILITY and not group_summary.responded:
# TODO: this might not be right unless we also clear it
create_alert(org_summary.supply_point, org_summary.date,
'delivery_not_responding', {'number': 1})
else:
# not an expected / needed group. ignore for now
pass
group_summary.save()
@transaction.commit_on_success
def update_product_availability_facility_data(org_summary):
# product availability
facility = Location.get(docid=org_summary.supply_point)
assert facility.location_type == "FACILITY"
prods = SQLProduct.objects.filter(domain=facility.domain, is_archived=False)
for p in prods:
product_data, created = ProductAvailabilityData.objects.get_or_create(
product=p.product_id,
supply_point=facility._id,
date=org_summary.date
)
if created:
# set defaults
product_data.total = 1
previous_reports = ProductAvailabilityData.objects.filter(
product=p.product_id,
supply_point=facility._id,
date__lt=org_summary.date
)
if previous_reports.count():
prev = previous_reports.latest('date')
product_data.with_stock = prev.with_stock
product_data.without_stock = prev.without_stock
product_data.without_data = prev.without_data
else:
# otherwise we use the defaults
product_data.with_stock = 0
product_data.without_stock = 0
product_data.without_data = 1
product_data.save()
assert (product_data.with_stock + product_data.without_stock + product_data.without_data) == 1, \
"bad product data config"
def populate_no_primary_alerts(location, date):
# First of all we have to delete all existing alert for this date.
alert = Alert.objects.filter(supply_point=location._id, date=date, type=NO_PRIMARY_CONTACT)
alert.delete()
# create no primary contact alerts
# TODO Too slow. Figure out better solution.
"""
if not filter(lambda user: user.is_active and user.location and user.location._id == org._id,
CommCareUser.by_domain(org.domain)):
create_multilevel_alert(org, date, NO_PRIMARY_CONTACT, {'org': org})
"""
def populate_facility_stockout_alerts(facility_id, date):
# delete stockout alerts
alert = Alert.objects.filter(supply_point=facility_id, date=date, type=PRODUCT_STOCKOUT)
alert.delete()
# create stockout alerts
product_data = ProductAvailabilityData.objects.filter(supply_point=facility_id, date=date, without_stock=1)
for p in product_data:
create_multilevel_alert(facility_id, date, PRODUCT_STOCKOUT, {'org': facility_id, 'product': p.product})
def create_multilevel_alert(location, date, alert_type, details):
create_alert(location._id, date, alert_type, details)
if location.parent is not None:
create_multilevel_alert(location.parent, date, alert_type, details)
def create_alert(location_id, date, alert_type, details):
text = ''
# url = ''
date = datetime(date.year, date.month, 1)
expyear, expmonth = add_months(date.year, date.month, 1)
expires = datetime(expyear, expmonth, 1)
number = 0 if 'number' not in details else details['number']
if alert_type in [PRODUCT_STOCKOUT, NO_PRIMARY_CONTACT]:
if alert_type == PRODUCT_STOCKOUT:
text = '%s is stocked out of %s.' % (details['org'].name, details['product'].name)
elif alert_type == NO_PRIMARY_CONTACT:
text = '%s has no primary contact.' % details['org'].name
alert = Alert.objects.filter(supply_point=location_id, date=date, type=alert_type, text=text)
if not alert:
Alert(supply_point=location_id, date=date, type=alert_type, expires=expires, text=text).save()
else:
if alert_type == RR_NOT_SUBMITTED:
text = '%s have reported not submitting their R&R form as of today.' % \
((str(number) + ' facility') if number == 1 else (str(number) + ' facilities'))
elif alert_type == RR_NOT_RESPONDED:
text = '%s did not respond to the SMS asking if they had submitted their R&R form.' % \
((str(number) + ' facility') if number == 1 else (str(number) + ' facilities'))
elif alert_type == DELIVERY_NOT_RECEIVED:
text = '%s have reported not receiving their deliveries as of today.' % \
((str(number) + ' facility') if number == 1 else (str(number) + ' facilities'))
elif alert_type == DELIVERY_NOT_RESPONDING:
text = '%s did not respond to the SMS asking if they had received their delivery.' % \
((str(number) + ' facility') if number == 1 else (str(number) + ' facilities'))
elif alert_type == SOH_NOT_RESPONDING:
text = '%s have not reported their stock levels for last month.' % \
((str(number) + ' facility') if number == 1 else (str(number) + ' facilities'))
alert, created = Alert.objects.get_or_create(
supply_point=location_id,
date=date,
type=alert_type,
expires=expires
)
alert.number = number
alert.text = text
alert.save()
def default_start_date():
return datetime(2012, 1, 1)
def _get_test_locations(domain):
"""
returns test region and all its children
"""
test_region = SQLLocation.objects.get(domain=domain, external_id=TEST_REGION_ID)
sql_locations = SQLLocation.objects.filter(
Q(domain=domain) & (Q(parent=test_region) | Q(parent__parent=test_region))
).order_by('id').only('location_id')
return [sql_location.couch_location for sql_location in sql_locations] + \
[test_region.couch_location]
def populate_report_data(start_date, end_date, domain, runner):
# first populate all the warehouse tables for all facilities
# hard coded to know this is the first date with data
start_date = max(start_date, default_start_date())
# For QA purposes generate reporting data for only some small part of data.
if not ILSGatewayConfig.for_domain(domain).all_stock_data:
locations = _get_test_locations(domain)
facilities = filter(lambda location: location.location_type == 'FACILITY', locations)
non_facilities_types = ['DISTRICT', 'REGION', 'MOHSW']
non_facilities = []
for location_type in non_facilities_types:
non_facilities.extend(filter(lambda location: location.location_type == location_type, locations))
else:
facilities = Location.filter_by_type(domain, 'FACILITY')
non_facilities = list(Location.filter_by_type(domain, 'DISTRICT'))
non_facilities += list(Location.filter_by_type(domain, 'REGION'))
non_facilities += list(Location.filter_by_type(domain, 'MOHSW'))
if runner.location:
if runner.location.location_type.name.upper() != 'FACILITY':
facilities = []
non_facilities = itertools.dropwhile(
lambda location: location._id != runner.location.location_id,
non_facilities
)
else:
facilities = itertools.dropwhile(
lambda location: location._id != runner.location.location_id,
facilities
)
facilities_chunked_list = chunked(facilities, 50)
for chunk in facilities_chunked_list:
res = chain(process_facility_warehouse_data.si(fac, start_date, end_date, runner) for fac in chunk)()
res.get()
non_facilities_chunked_list = chunked(non_facilities, 50)
# then populate everything above a facility off a warehouse table
for chunk in non_facilities_chunked_list:
res = chain(process_non_facility_warehouse_data.si(org, start_date, end_date, runner) for org in chunk)()
res.get()
runner.location = None
runner.save()
# finally go back through the history and initialize empty data for any
# newly created facilities
update_historical_data(domain)
@task(queue='background_queue', ignore_result=True)
def process_facility_warehouse_data(facility, start_date, end_date, runner):
"""
process all the facility-level warehouse tables
"""
logging.info("processing facility %s (%s)" % (facility.name, str(facility._id)))
runner.location = facility.sql_location
runner.save()
for alert_type in [SOH_NOT_RESPONDING, RR_NOT_RESPONDED, DELIVERY_NOT_RESPONDING]:
alert = Alert.objects.filter(supply_point=facility._id, date__gte=start_date, date__lt=end_date,
type=alert_type)
alert.delete()
supply_point_id = facility.linked_supply_point()._id
location_id = facility._id
new_statuses = SupplyPointStatus.objects.filter(
supply_point=facility._id,
status_date__gte=start_date,
status_date__lt=end_date
).order_by('status_date')
process_facility_statuses(location_id, new_statuses)
new_reports = StockReport.objects.filter(
stocktransaction__case_id=supply_point_id,
date__gte=start_date,
date__lt=end_date,
stocktransaction__type='stockonhand'
).order_by('date')
process_facility_product_reports(location_id, new_reports)
new_trans = StockTransaction.objects.filter(
case_id=supply_point_id,
report__date__gte=start_date,
report__date__lt=end_date,
).exclude(type='consumption').order_by('report__date')
process_facility_transactions(location_id, new_trans)
# go through all the possible values in the date ranges
# and make sure there are warehouse tables there
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
# create org_summary for every fac/date combo
org_summary, created = OrganizationSummary.objects.get_or_create(
supply_point=facility._id,
date=window_date
)
org_summary.total_orgs = 1
alt = average_lead_time(facility._id, window_date)
if alt:
alt = alt.days
org_summary.average_lead_time_in_days = alt or 0
org_summary.save()
# create group_summary for every org_summary title combo
for title in NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
# update all the non-response data
not_responding_facility(org_summary)
# update product availability data
update_product_availability_facility_data(org_summary)
# alerts
populate_no_primary_alerts(facility, window_date)
populate_facility_stockout_alerts(facility, window_date)
@transaction.commit_on_success
def process_facility_statuses(facility_id, statuses, alerts=True):
"""
For a given facility and list of statuses, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
facility = Location.get(docid=facility_id)
for status in statuses:
warehouse_date = _get_window_date(status.status_type, status.status_date)
if _is_valid_status(facility, status.status_date, status.status_type):
org_summary = OrganizationSummary.objects.get_or_create(
supply_point=facility_id,
date=warehouse_date
)[0]
group_summary = GroupSummary.objects.get_or_create(
org_summary=org_summary,
title=status.status_type
)[0]
group_summary.total = 1
if status.status_value not in (SupplyPointStatusValues.REMINDER_SENT,
SupplyPointStatusValues.ALERT_SENT):
# we've responded to this query
group_summary.responded = 1
if status.status_value in [SupplyPointStatusValues.SUBMITTED,
SupplyPointStatusValues.RECEIVED]:
group_summary.complete = 1
else:
group_summary.complete = group_summary.complete or 0
if group_summary.complete:
if is_on_time(status.status_date, warehouse_date, status.status_type):
group_summary.on_time = 1
else:
group_summary.on_time = group_summary.on_time
else:
group_summary.on_time = 0
group_summary.save()
if alerts:
if status.status_value == SupplyPointStatusValues.NOT_SUBMITTED \
and status.status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
create_alert(facility_id, status.status_date, RR_NOT_SUBMITTED,
{'number': 1})
if status.status_value == SupplyPointStatusValues.NOT_RECEIVED \
and status.status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
create_alert(facility_id, status.status_date, DELIVERY_NOT_RECEIVED,
{'number': 1})
def process_facility_product_reports(facility_id, reports):
"""
For a given facility and list of ProductReports, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities. Currently this only affects stock on hand reporting
data. We need to use this method instead of the statuses because partial
stock on hand reports don't create valid status, but should be treated
like valid submissions in most of the rest of the site.
"""
months_updated = {}
for report in reports:
stock_transactions = report.stocktransaction_set.filter(type='stockonhand')
assert stock_transactions.count() > 0
warehouse_date = _get_window_date(SupplyPointStatusTypes.SOH_FACILITY, report.date)
if warehouse_date in months_updated:
# an optimization to avoid repeatedly doing this work for each
# product report for the entire month
continue
org_summary = OrganizationSummary.objects.get_or_create(supply_point=facility_id, date=warehouse_date)[0]
group_summary = GroupSummary.objects.get_or_create(org_summary=org_summary,
title=SupplyPointStatusTypes.SOH_FACILITY)[0]
group_summary.total = 1
group_summary.responded = 1
group_summary.complete = 1
if is_on_time(report.date, warehouse_date, SupplyPointStatusTypes.SOH_FACILITY):
group_summary.on_time = 1
group_summary.save()
months_updated[warehouse_date] = None # update the cache of stuff we've dealt with
@transaction.commit_on_success
def process_facility_transactions(facility_id, transactions):
"""
For a given facility and list of transactions, update the appropriate
data warehouse tables. This should only be called on supply points
that are facilities.
"""
for trans in transactions:
date = trans.report.date
product_data = ProductAvailabilityData.objects.get_or_create(
product=trans.product_id,
supply_point=facility_id,
date=datetime(date.year, date.month, 1)
)[0]
product_data.total = 1
product_data.without_data = 0
if trans.stock_on_hand <= 0:
product_data.without_stock = 1
product_data.with_stock = 0
else:
product_data.without_stock = 0
product_data.with_stock = 1
product_data.save()
def get_nested_children(location):
children = []
if not location.children:
return [location]
for child in location.children:
children.extend(get_nested_children(child))
return children
@task(queue='background_queue', ignore_result=True)
def process_non_facility_warehouse_data(location, start_date, end_date, runner, strict=True):
runner.location = location.sql_location
runner.save()
facs = get_nested_children(location)
fac_ids = [f._id for f in facs]
logging.info("processing non-facility %s (%s), %s children" % (location.name, str(location._id), len(facs)))
for year, month in months_between(start_date, end_date):
window_date = datetime(year, month, 1)
org_summary = OrganizationSummary.objects.get_or_create(supply_point=location._id, date=window_date)[0]
org_summary.total_orgs = len(facs)
sub_summaries = OrganizationSummary.objects.filter(date=window_date, supply_point__in=fac_ids)
subs_with_lead_time = [s for s in sub_summaries if s.average_lead_time_in_days]
# lead times
if subs_with_lead_time:
days_sum = sum([s.average_lead_time_in_days for s in subs_with_lead_time])
org_summary.average_lead_time_in_days = days_sum / len(subs_with_lead_time)
else:
org_summary.average_lead_time_in_days = 0
org_summary.save()
# product availability
prods = SQLProduct.objects.filter(domain=location.domain, is_archived=False)
for p in prods:
product_data = ProductAvailabilityData.objects.get_or_create(product=p.product_id,
supply_point=location._id,
date=window_date)[0]
sub_prods = ProductAvailabilityData.objects.filter(product=p.product_id,
supply_point__in=fac_ids,
date=window_date)
product_data.total = sum([p.total for p in sub_prods])
if strict:
assert product_data.total == len(facs), \
"total should match number of sub facilities"
product_data.with_stock = sum([p.with_stock for p in sub_prods])
product_data.without_stock = sum([p.without_stock for p in sub_prods])
product_data.without_data = product_data.total - product_data.with_stock - product_data.without_stock
product_data.save()
dg = DeliveryGroups(month=month, facs=facs)
for status_type in NEEDED_STATUS_TYPES:
gsum = GroupSummary.objects.get_or_create(org_summary=org_summary, title=status_type)[0]
sub_sums = GroupSummary.objects.filter(title=status_type, org_summary__in=sub_summaries).all()
# TODO: see if moving the aggregation to the db makes it
# faster, if this is slow
gsum.total = sum([s.total for s in sub_sums])
gsum.responded = sum([s.responded for s in sub_sums])
gsum.on_time = sum([s.on_time for s in sub_sums])
gsum.complete = sum([s.complete for s in sub_sums])
# gsum.missed_response = sum([s.missed_response for s in sub_sums])
gsum.save()
if status_type == SupplyPointStatusTypes.DELIVERY_FACILITY:
expected = len(dg.delivering())
elif status_type == SupplyPointStatusTypes.R_AND_R_FACILITY:
expected = len(dg.submitting())
elif status_type == SupplyPointStatusTypes.SOH_FACILITY \
or status_type == SupplyPointStatusTypes.SUPERVISION_FACILITY:
expected = len(facs)
if gsum.total != expected:
logging.info("expected %s but was %s for %s" % (expected, gsum.total, gsum))
for alert_type in [RR_NOT_SUBMITTED, DELIVERY_NOT_RECEIVED,
SOH_NOT_RESPONDING, RR_NOT_RESPONDED, DELIVERY_NOT_RESPONDING]:
sub_alerts = Alert.objects.filter(supply_point__in=fac_ids, date=window_date, type=alert_type)
aggregate_response_alerts(location._id, window_date, sub_alerts, alert_type)
def aggregate_response_alerts(location_id, date, alerts, alert_type):
total = sum([s.number for s in alerts])
if total > 0:
create_alert(location_id, date, alert_type, {'number': total})
def update_historical_data(domain):
"""
If we don't have a record of this supply point being updated, run
through all historical data and just fill in with zeros.
"""
org_summaries = OrganizationSummary.objects.order_by('date')
if org_summaries.count() == 0:
return
start_date = org_summaries[0].date
if not ILSGatewayConfig.for_domain(domain).all_stock_data:
locations = _get_test_locations(domain)
else:
locations = Location.by_domain(domain)
for sp in locations:
try:
SupplyPointWarehouseRecord.objects.get(supply_point=sp._id)
except SupplyPointWarehouseRecord.DoesNotExist:
# we didn't have a record so go through and historically update
# anything we maybe haven't touched
for year, month in months_between(start_date, sp.sql_location.created_at):
window_date = datetime(year, month, 1)
for cls in [OrganizationSummary, ProductAvailabilityData, GroupSummary]:
_init_warehouse_model(cls, sp, window_date)
SupplyPointWarehouseRecord.objects.create(supply_point=sp._id,
create_date=datetime.utcnow())
def _init_warehouse_model(cls, location, date):
if cls == OrganizationSummary:
_init_default(location, date)
elif cls == ProductAvailabilityData:
_init_with_product(location, date)
elif cls == GroupSummary:
_init_group_summary(location, date)
def _init_default(location, date):
OrganizationSummary.objects.get_or_create(supply_point=location._id, date=date)
def _init_with_product(location, date):
for p in SQLProduct.objects.filter(domain=location.domain, is_archived=False):
ProductAvailabilityData.objects.get_or_create(supply_point=location._id, date=date, product=p.product_id)
def _init_group_summary(location, date):
org_summary = OrganizationSummary.objects.get(supply_point=location._id, date=date)
for title in NEEDED_STATUS_TYPES:
GroupSummary.objects.get_or_create(org_summary=org_summary,
title=title)
| bsd-3-clause | 6,236,115,684,714,838,000 | 42.968703 | 114 | 0.633563 | false |
sdelaughter/SmartHome | v0.3/resources/door.py | 1 | 4268 | # Samuel DeLaughter
# 5/8/15
from SimpleXMLRPCServer import SimpleXMLRPCServer
from threading import Thread
import xmlrpclib
import logging
import socket
import random
import time
import iot
class door(iot.device):
#The class defining the door sensor object
#Can be set to open (1) or closed (0), or queried for its current state
def __init__(self):
iot.device.__init__(self)
self.name = 'door'
self.category = 'device'
self.state = 0
self.sensing_interval = 5
#Set up logging
iot.setup_log(self.name, time.localtime())
#Register with the gateway
self.register()
'''
#Initialize and start daemon thread for serving as the clock synchronization leader
leader_thread=Thread(target=self.lead, name='Door Leader Thread')
leader_thread.daemon = True
leader_thread.start()
'''
#Initialize and start daemon thread for the client to push state changes to the server
client_thread=Thread(target=self.start_client, name='Door Client Thread')
client_thread.daemon = True
client_thread.start()
#Start the door server
self.serve()
def serve(self):
self.server = SimpleXMLRPCServer((self.ip, self.port), logRequests=False, allow_none=True)
self.server.register_function(self.ping)
self.server.register_function(self.serve)
self.server.register_function(self.register)
self.server.register_function(self.timestamp)
#self.server.register_function(self.start_election)
#self.server.register_function(self.lead)
#self.server.register_function(self.get_time)
#self.server.register_function(self.set_time)
self.server.register_function(self.get_attr)
self.server.register_function(self.set_attr)
self.server.register_function(self.db_get_state)
self.server.register_function(self.db_get_history)
#self.server.register_function(self.set_leader)
self.server.register_function(self.device_by_name)
self.server.register_function(self.devices_by_name)
self.server.register_function(self.update_device_list)
self.server.register_function(self.sense)
self.clock += 1
try:
print '\nStarting Server'
print 'Use Control-C to exit'
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Starting Server')
self.server.serve_forever()
except KeyboardInterrupt:
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Received keyboard interrupt, stopping server')
print 'Exiting'
def sense(self):
#Informs the gateway to change its door_state attribute whenever the state changes
#If open, it has a 75% chance of closing
#If closed, it has a 25% chance of opening
if self.state == 1:
p = 0.75
else:
p = 0.25
if(random.random() <= p):
s = int(not(self.state))
self.state = s
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'State was set to ' + str(s))
print('State was set to ' + str(s))
self.clock += 1
self.db_put(self.name, self.state)
try:
#Set the gateway's door_state attribute
r = iot.gateway_connection()
c = r.handle_door_state(self.clock, s)
self.update_clock(c)
logging.info(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Set gateway door state to: ' + str(s))
except:
logging.warning(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'Failed to contact gateway')
print('WARNING: Failed to contact gateway')
self.register()
def get_attr(self, c, attr):
#Called remotely to get the value of a given attribute
#If the state attribute is requested, run self.sense() to update it before returning it
#Otherwise, act identical to iot.device.get_attr()
self.update_clock(c)
if(attr == 'state'):
self.sense()
logging.debug(str(self.clock) + ' | ' + str(self.timestamp()) + ': ' + 'State was queried, returning ' + str(self.state))
print('State was queried, returning ' + str(self.state))
return self.clock, self.state
elif(hasattr(self, attr)):
v = getattr(self, attr)
return self.clock, v
else:
return self.clock, None
def start_client(self):
#Start the client to monitor for user presence
while True:
self.sense()
time.sleep(self.sensing_interval)
def main():
#Create a new instance of the door object
d=door()
if __name__ == '__main__':
main() | gpl-2.0 | 8,859,138,296,888,232,000 | 30.622222 | 124 | 0.692596 | false |
ictground/Chainforger | core/checker.py | 1 | 2415 | import re, requests, sys
from .gui import *
class Checker:
def __init__(self, timeout = 10, exportFileName = None):
self.timeout = timeout
self.exportFileName = exportFileName
self.filter = []
def showStatus(self):
if self.timeout != None:
showInfo("Proxy timeout: " + str(self.timeout) + "s")
if self.exportFileName != None:
showInfo("Exporting to: " + self.exportFileName)
else:
showWarning("No output file selected. Use '--export' or ignore this message")
print()
def setFilter(self, _filter):
self.filter = _filter.split(',')
def checkProxyList(self, fileToCheck):
showBanner()
showInfo("Checking your proxies...")
self.showStatus()
try:
file = open(fileToCheck, "r")
for line in file:
if re.match(r"http[s]*|socks\d", line):
data = self.parseArray(line.rstrip())
if data["protocol"] not in self.filter:
self.checkProxy(data["protocol"], data["ip"], data["port"])
except FileNotFoundError:
showError("File '" + fileToCheck + "' not found!")
def parseArray(self, line):
match = re.match(r"(.+?) (\d{1,4}\.\d{1,4}\.\d{1,4}\.\d{1,4}) (\d+)", line)
if match == False:
print("Error: Proxylist does not match proxychains format!")
sys.exit(1)
return {
"protocol": match.group(1),
"ip": match.group(2),
"port": match.group(3)
}
def checkProxy(self, protocol, ip, port):
if protocol == "http":
proxy = {"http": "http://" + ip}
if protocol == "https":
proxy = {"https" : "http://" + ip}
if protocol == "socks4":
proxy = {"https" : "socks4://" + ip, "http" : "socks4://" + ip}
if protocol == "socks5":
proxy = {"https" : "socks5://" + ip, "http" : "socks5://" + ip}
try:
r = requests.get("https://api.ipify.org/", proxies=proxy, timeout=self.timeout)
if r.status_code == 200:
print(getGreen("[OK]:"), protocol, ip, port)
if self.exportFileName != None:
self.writeToFile(self.exportFileName, protocol + " " + ip + " " + port)
return True
else:
print(getRed("[ERROR]:"), protocol, ip, port + "[Dead proxy]")
return False
except Exception as e:
print(getRed("[ERROR]:"), protocol, ip, port + "[Can't connect]")
return False
except KeyboardInterrupt:
showWarning("Stopping application...")
sys.exit(0)
def writeToFile(self, filename, text):
file = open(filename, "a")
file.write(text + "\n")
file.close()
| gpl-3.0 | -3,740,616,617,319,418,400 | 25.833333 | 82 | 0.61118 | false |
jalandra/cool_scripts | harness.py | 1 | 5015 | import subprocess
import sys
import os
import win32com.client
import time
outlook = None
nameAliasDict = {}
contacts = None
numEntries = None
file = None
mail_schema = "http://schemas.microsoft.com/mapi/proptag/0x800F101F"
alias_schema = "http://schemas.microsoft.com/mapi/proptag/0x3A00001F"
criticalCommit = False
svn_path ='https://svn.ali.global/gen7/mk7games/games/red_black_yellow/branches/'
temp_svn_path = None
lastRevision = None
#get the svn revision number for the critical fix, all svn logs are written here
def getLastCommitRevision():
svn_revision_file = os.path.expanduser('~/AppData/Local/TortoiseSVN/logfile.txt')
revision_num_generated = False
revision_list = []
for line in open(svn_revision_file):
if revision_num_generated:
if 'At revision' in line:
revision_num = line.split(':')[-1]
revision_list.append(revision_num.strip())
revision_num_generated = False
if 'Committing transaction' in line:
revision_num_generated = True
#print revision_list
return revision_list[-1]
#check if the last commit was critical or not
def isCriticalCommit():
global criticalCommit, svn_path, temp_svn_path, lastRevision
cmd = 'svn info > svn_log.txt'
p = subprocess.Popen(cmd , shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, stderr = p.communicate()
for line in open('svn_log.txt'):
if 'URL' in line:
svn_path = line.split(' ')[1].strip()
break
temp_svn_path = svn_path
svn_path = svn_path.split('branches')[0].strip()
svn_path = svn_path + 'branches'
lastRevision = getLastCommitRevision()
while True:
cmd = 'svn log '+svn_path+' -r '+lastRevision
p = subprocess.Popen(cmd , shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, stderr = p.communicate()
if 'No such revision' in output:
print "Repository not synched yet, retrying in next 10 seconds..."
time.sleep(10)
elif 'Critical Commit' in output:
criticalCommit = True
break
else:
criticalCommit = False
break
#get a substring between first and last
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start+1 )
return s[start:end]
except ValueError:
return ""
#send a mail
def send_mail(branchname,recipient , manager):
global outlook
subject = "[IMPORTANT]Critical Check-in Information"
body = "The branch " + branchname+" has got a critical issue checked in at revision: "+lastRevision+" , please have a look at the changes."
mail = outlook.CreateItem(0)
mail.To = recipient
if manager != None :
mail.CC = manager
mail.Subject = subject
mail.Body = body
print "Sending mail to the stakeholders"
mail.Send()
def getMailId(contact) :
prop_str = contact.PropertyAccessor.GetProperty(mail_schema)
prop = str(prop_str).split(',')
mail = find_between(prop[0],'\'','\'').split(':')[-1]
return mail
#Get the global address list
def fillContactList():
global contacts, nameAliasDict, file
for i in contacts:
# name = i.Name
prop_str_alias = i.PropertyAccessor.GetProperty(alias_schema)
nameAliasDict[prop_str_alias.lower()] = i
file.write(prop_str_alias.encode('utf-8').strip() + "\n")
file.close()
def init():
global outlook, contacts, numEntries, file
outlook = win32com.client.gencache.EnsureDispatch("Outlook.Application")
ns = outlook.GetNamespace("MAPI")
file = open('mapping.txt', 'w')
adrLi = ns.AddressLists.Item("Global Address List")
contacts = adrLi.AddressEntries
numEntries = adrLi.AddressEntries.Count
def getSvnLog():
global temp_svn_path
cmd = 'svn log -q '+svn_path+'> svn_log.txt'
p = subprocess.Popen(cmd , shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, stderr = p.communicate()
authors = set()
for line in open('svn_log.txt'):
if 'r' in line:
authors.add(line.split('|')[1].strip())
author_mail = ""
manager_mail = ""
for author in sorted(authors):
if author.lower() in nameAliasDict:
author_mail = author_mail + ';' + getMailId(nameAliasDict[author.lower()])
if nameAliasDict[author.lower()].Manager != None:
manager_mail = manager_mail + ';' + getMailId(nameAliasDict[author.lower()].Manager)
send_mail(temp_svn_path,"[email protected]", "[email protected];[email protected]")
#send_mail(temp_svn_path,author_mail,manager_mail)
def removeFile():
os.remove ('svn_log.txt')
os.remove ('mapping.txt')
isCriticalCommit()
if criticalCommit == True:
print "critical commit detected \n"
init()
fillContactList()
getSvnLog()
removeFile()
| gpl-3.0 | -7,528,241,245,435,543,000 | 33.115646 | 143 | 0.642273 | false |
pupil-labs/hmd-eyes | python_reference_client/hmd_calibration_client.py | 1 | 3455 | """
HMD calibration client example.
This script shows how to talk to Pupil Capture or Pupil Service
and run a gaze mapper calibration.
"""
import zmq, msgpack, time
ctx = zmq.Context()
# create a zmq REQ socket to talk to Pupil Service/Capture
req = ctx.socket(zmq.REQ)
req.connect("tcp://127.0.0.1:50020")
# convenience functions
def send_recv_notification(n):
# REQ REP requirese lock step communication with multipart msg (topic,msgpack_encoded dict)
req.send_string("notify.%s" % n["subject"], flags=zmq.SNDMORE)
req.send(msgpack.dumps(n, use_bin_type=True))
return req.recv_string()
def get_pupil_timestamp():
req.send_string("t") # see Pupil Remote Plugin for details
return float(req.recv_string())
# set start eye windows
n = {"subject": "eye_process.should_start.0", "eye_id": 0, "args": {}}
print(send_recv_notification(n))
n = {"subject": "eye_process.should_start.1", "eye_id": 1, "args": {}}
print(send_recv_notification(n))
time.sleep(2)
# set calibration method to hmd calibration
n = {"subject": "start_plugin", "name": "HMD3DChoreographyPlugin", "args": {}}
print(send_recv_notification(n))
# start caliration routine with params. This will make pupil start sampeling pupil data.
# the eye-translations have to be in mm, these here are default values from Unity XR
n = {
"subject": "calibration.should_start",
"translation_eye0": [34.75, 0.0, 0.0],
"translation_eye1": [-34.75, 0.0, 0.0],
"record": True,
}
print(send_recv_notification(n))
# Mockup logic for sample movement:
# We sample some reference positions in scene coordinates (mm) relative to the HMD.
# Positions can be freely defined
ref_data = []
for pos in (
(0.0, 0.0, 600.0),
(0.0, 0.0, 1000.0),
(0.0, 0.0, 2000.0),
(180.0, 0.0, 600.0),
(240.0, 0.0, 1000.0),
(420.0, 0.0, 2000.0),
(55.62306, 195.383, 600.0),
(74.16407, 260.5106, 1000.0),
(129.7871, 455.8936, 2000.0),
(-145.6231, 120.7533, 600.0),
(-194.1641, 161.0044, 1000.0),
(-339.7872, 281.7577, 2000.0),
(-145.6231, -120.7533, 600.0),
(-194.1641, -161.0044, 1000.0),
(-339.7872, -281.7577, 2000.0),
(55.62306, -195.383, 600.0),
(74.16407, -260.5106, 1000.0),
(129.7871, -455.8936, 2000.0),
):
print("subject now looks at position:", pos)
for s in range(60):
# you direct screen animation instructions here
# get the current pupil time (pupil uses CLOCK_MONOTONIC with adjustable timebase).
# You can set the pupil timebase to another clock and use that.
t = get_pupil_timestamp()
# in this mockup the left and right screen marker positions are identical.
datum0 = {"mm_pos": pos, "timestamp": t}
datum1 = {"mm_pos": pos, "timestamp": t}
ref_data.append(datum0)
ref_data.append(datum1)
time.sleep(1 / 60.0) # simulate animation speed.
# Send ref data to Pupil Capture/Service:
# This notification can be sent once at the end or multiple times.
# During one calibraiton all new data will be appended.
n = {
"subject": "calibration.add_ref_data",
"ref_data": ref_data,
"record": True,
}
print(send_recv_notification(n))
# stop calibration
# Pupil will correlate pupil and ref data based on timestamps,
# compute the gaze mapping params, and start a new gaze mapper.
n = {
"subject": "calibration.should_stop",
"record": True,
}
print(send_recv_notification(n))
time.sleep(2)
| lgpl-3.0 | 921,651,937,440,219,000 | 29.575221 | 95 | 0.655861 | false |
default-to-open/rpmdeplint | acceptance_tests/test_check.py | 1 | 10891 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import glob
import time
import shutil
import rpmfluff
from data_setup import run_rpmdeplint
from rpmdeplint.repodata import cache_base_path
def expected_cache_path(repodir, suffix, old=False):
"""
For the test repo located in *repodir*, return the path within the
rpmdeplint cache where we expect the metadata file with given suffix
to appear after rpmdeplint has downloaded it.
"""
filename, = [filename for filename in os.listdir(os.path.join(repodir, 'repodata'))
if filename.endswith(suffix)]
checksum = filename.split('-', 1)[0]
if old:
return os.path.join(cache_base_path(), checksum[:1], checksum[1:], filename)
return os.path.join(cache_base_path(), checksum[:1], checksum[1:])
def test_finds_all_problems(request, dir_server):
p_newer = rpmfluff.SimpleRpmBuild('a', '5.0', '1', ['i386'])
p_with_content = rpmfluff.SimpleRpmBuild('b', '0.1', '1', ['i386'])
p_with_content.add_installed_file(installPath='usr/share/thing',
sourceFile=rpmfluff.SourceFile('thing', 'content\n'))
p_old_soname = rpmfluff.SimpleRpmBuild('c', '0.1', '1', ['i386'])
p_old_soname.add_provides('libfoo.so.4')
p_depending = rpmfluff.SimpleRpmBuild('d', '0.1', '1', ['i386'])
p_depending.add_requires('libfoo.so.4')
repo_packages = [p_newer, p_with_content, p_old_soname, p_depending]
baserepo = rpmfluff.YumRepoBuild(repo_packages)
baserepo.make('i386')
dir_server.basepath = baserepo.repoDir
p_older = rpmfluff.SimpleRpmBuild('a', '4.0', '1', ['i386'])
p_older.make()
p_broken = rpmfluff.SimpleRpmBuild('e', '1.0', '1', ['i386'])
p_broken.add_requires('doesnotexist')
p_broken.make()
p_with_different_content = rpmfluff.SimpleRpmBuild('f', '0.1', '1', ['i386'])
p_with_different_content.add_installed_file(installPath='usr/share/thing',
sourceFile=rpmfluff.SourceFile('thing', 'different content\n'))
p_with_different_content.make()
p_soname_changed = rpmfluff.SimpleRpmBuild('c', '0.2', '1', ['i386'])
p_soname_changed.add_provides('libfoo.so.5')
p_soname_changed.make()
test_packages = [p_older, p_broken, p_with_different_content, p_soname_changed]
def cleanUp():
shutil.rmtree(baserepo.repoDir)
for p in repo_packages + test_packages:
shutil.rmtree(p.get_base_dir())
request.addfinalizer(cleanUp)
exitcode, out, err = run_rpmdeplint(
['rpmdeplint', 'check', '--repo=base,{}'.format(dir_server.url)] +
[p.get_built_rpm('i386') for p in test_packages])
assert exitcode == 3
assert err == ('Problems with dependency set:\n'
'nothing provides doesnotexist needed by e-1.0-1.i386\n'
'Dependency problems with repos:\n'
'package d-0.1-1.i386 requires libfoo.so.4, but none of the providers can be installed\n'
'Undeclared file conflicts:\n'
'f-0.1-1.i386 provides /usr/share/thing which is also provided by b-0.1-1.i386\n'
'Upgrade problems:\n'
'a-4.0-1.i386 would be upgraded by a-5.0-1.i386 from repo base\n')
def test_guesses_arch_when_combined_with_noarch_package(request, dir_server):
# A more realistic case is an archful package with a noarch subpackage,
# but rpmfluff currently can't produce that.
p_noarch = rpmfluff.SimpleRpmBuild('a', '0.1', '1', ['noarch'])
p_noarch.add_requires('libfoo.so.4')
p_noarch.make()
p_archful = rpmfluff.SimpleRpmBuild('b', '0.1', '1', ['i386'])
p_archful.add_requires('libfoo.so.4')
p_archful.make()
baserepo = rpmfluff.YumRepoBuild([])
baserepo.make('i386')
dir_server.basepath = baserepo.repoDir
def cleanUp():
shutil.rmtree(baserepo.repoDir)
shutil.rmtree(p_noarch.get_base_dir())
shutil.rmtree(p_archful.get_base_dir())
request.addfinalizer(cleanUp)
exitcode, out, err = run_rpmdeplint([
'rpmdeplint', 'check', '--repo=base,{}'.format(dir_server.url),
p_noarch.get_built_rpm('noarch'), p_archful.get_built_rpm('i386')
])
assert exitcode == 3, err
assert err == ('Problems with dependency set:\n'
'nothing provides libfoo.so.4 needed by a-0.1-1.noarch\n'
'nothing provides libfoo.so.4 needed by b-0.1-1.i386\n')
def test_cache_is_used_when_available(request, dir_server):
p1 = rpmfluff.SimpleRpmBuild('a', '0.1', '1', ['i386'])
baserepo = rpmfluff.YumRepoBuild((p1,))
baserepo.make('i386')
dir_server.basepath = baserepo.repoDir
def cleanUp():
shutil.rmtree(baserepo.repoDir)
shutil.rmtree(p1.get_base_dir())
request.addfinalizer(cleanUp)
# Assuming cache is cleaned first
assert dir_server.num_requests == 0
run_rpmdeplint(['rpmdeplint', 'check', '--repo=base,{}'.format(
dir_server.url), p1.get_built_rpm('i386')])
cache_path = expected_cache_path(baserepo.repoDir, 'primary.xml.gz')
assert os.path.exists(cache_path)
original_cache_mtime = os.path.getmtime(cache_path)
# A single run of rpmdeplint with a clean cache should expect network
# requests for - repomd.xml, primary.xml.gz and filelists.xml.gz. Requiring
# a total of 3
assert dir_server.num_requests == 3
run_rpmdeplint(['rpmdeplint', 'check', '--repo=base,{}'.format(
dir_server.url), p1.get_built_rpm('i386')])
new_cache_mtime = os.path.getmtime(cache_path)
assert new_cache_mtime > original_cache_mtime
# Executing 2 subprocesses should expect 4 requests if repodata cache is
# functioning correctly. A single request for each file in the repo
# - repomd.xml, primary.xml.gz, filelists.xml.gz, with an additional
# request from the second process checking metadata. The additional
# single request shows that the files are skipped in the second process
assert dir_server.num_requests == 4
def test_cache_doesnt_grow_unboundedly(request, dir_server):
os.environ['RPMDEPLINT_EXPIRY_SECONDS'] = '1'
p1 = rpmfluff.SimpleRpmBuild('a', '0.1', '1', ['i386'])
firstrepo = rpmfluff.YumRepoBuild((p1, ))
firstrepo.make('i386')
dir_server.basepath = firstrepo.repoDir
def cleanup():
shutil.rmtree(firstrepo.repoDir)
shutil.rmtree(p1.get_base_dir())
request.addfinalizer(cleanup)
exitcode, out, err = run_rpmdeplint(['rpmdeplint', 'check',
'--repo=base,{}'.format(dir_server.url),
p1.get_built_rpm('i386')])
assert exitcode == 0
first_primary_cache_path = expected_cache_path(firstrepo.repoDir, 'primary.xml.gz')
first_filelists_cache_path = expected_cache_path(firstrepo.repoDir, 'filelists.xml.gz')
assert os.path.exists(first_primary_cache_path)
assert os.path.exists(first_filelists_cache_path)
p2 = rpmfluff.SimpleRpmBuild('b', '0.1', '1', ['i386'])
secondrepo = rpmfluff.YumRepoBuild((p2, ))
secondrepo.make('i386')
dir_server.basepath = secondrepo.repoDir
def cleanup2():
shutil.rmtree(secondrepo.repoDir)
shutil.rmtree(p2.get_base_dir())
request.addfinalizer(cleanup2)
# ensure time period of cache has expired
time.sleep(2)
exitcode, out, err = run_rpmdeplint(['rpmdeplint', 'check',
'--repo=base,{}'.format(dir_server.url),
p2.get_built_rpm('i386')])
assert exitcode == 0
second_primary_cache_path = expected_cache_path(secondrepo.repoDir, 'primary.xml.gz')
second_filelists_cache_path = expected_cache_path(secondrepo.repoDir, 'filelists.xml.gz')
# Ensure the cache only has files from the second one
assert not os.path.exists(first_primary_cache_path)
assert not os.path.exists(first_filelists_cache_path)
assert os.path.exists(second_primary_cache_path)
assert os.path.exists(second_filelists_cache_path)
def test_migrates_old_cache_layout(request, dir_server):
p1 = rpmfluff.SimpleRpmBuild('a', '0.1', '1', ['i386'])
repo = rpmfluff.YumRepoBuild([p1])
repo.make('i386')
dir_server.basepath = repo.repoDir
def cleanUp():
shutil.rmtree(repo.repoDir)
shutil.rmtree(p1.get_base_dir())
request.addfinalizer(cleanUp)
old_cache_path = expected_cache_path(repo.repoDir, 'primary.xml.gz', old=True)
new_cache_path = expected_cache_path(repo.repoDir, 'primary.xml.gz')
# Simulate the old cache path left over from an older version of rpmdeplint
os.makedirs(os.path.dirname(old_cache_path))
with open(old_cache_path, 'w') as f:
f.write('lol\n')
exitcode, out, err = run_rpmdeplint(['rpmdeplint', 'check',
'--repo=base,{}'.format(dir_server.url),
p1.get_built_rpm('i386')])
assert exitcode == 0
assert err == ''
assert not os.path.exists(old_cache_path)
assert os.path.isfile(new_cache_path)
def test_prints_error_on_repo_download_failure(request, dir_server):
# Specifically we don't want an unhandled exception, because that triggers abrt.
test_tool_rpm = rpmfluff.SimpleRpmBuild('test-tool', '10', '3.el6', ['x86_64'])
test_tool_rpm.make()
def cleanUp():
shutil.rmtree(test_tool_rpm.get_base_dir())
request.addfinalizer(cleanUp)
exitcode, out, err = run_rpmdeplint([
'rpmdeplint', 'check', '--repo=broken,http://notexist.example/',
test_tool_rpm.get_built_rpm('x86_64')
])
assert exitcode == 1
assert err.startswith('Failed to download repodata')
assert 'Traceback' not in err
def test_prints_error_on_repodata_file_download_failure(request, dir_server):
# Similar to the above, but in this case repomd.xml works but
# primary.xml.gz is broken. We test this case specifically, because the
# code paths for fetching repomd.xml and the other repodata files are
# separate.
p1 = rpmfluff.SimpleRpmBuild('test-tool', '10', '3.el6', ['x86_64'])
p1.add_requires('unsatisfied')
repo = rpmfluff.YumRepoBuild([p1])
repo.make('x86_64')
for repodata_filename in os.listdir(os.path.join(repo.repoDir, 'repodata')):
if 'primary' in repodata_filename:
os.unlink(os.path.join(repo.repoDir, 'repodata', repodata_filename))
dir_server.basepath = repo.repoDir
def cleanUp():
shutil.rmtree(repo.repoDir)
shutil.rmtree(p1.get_base_dir())
request.addfinalizer(cleanUp)
exitcode, out, err = run_rpmdeplint(['rpmdeplint', 'check',
'--repo=base,{}'.format(dir_server.url), p1.get_built_rpm('x86_64')])
assert exitcode == 1
assert err.startswith('Failed to download repodata')
assert '404' in err
assert 'Traceback' not in err
| gpl-2.0 | -6,491,165,283,014,915,000 | 39.040441 | 101 | 0.663208 | false |
SublimeText/PackageDev | plugins/command_completions/__init__.py | 1 | 9912 | from collections import OrderedDict
import logging
import json
import re
import itertools
import sublime
import sublime_plugin
from ..lib import inhibit_word_completions
from .commandinfo import (
get_command_name,
get_builtin_command_meta_data,
get_builtin_commands,
iter_python_command_classes,
get_args_from_command_name
)
__all__ = (
"SublimeTextCommandCompletionPythonListener",
"SublimeTextCommandArgsCompletionListener",
"SublimeTextCommandArgsCompletionPythonListener",
"SublimeTextCommandCompletionListener",
)
KIND_APPLICATION = (sublime.KIND_ID_FUNCTION, "A", "Application Command")
KIND_WINDOW = (sublime.KIND_ID_FUNCTION, "W", "Window Command")
KIND_TEXT = (sublime.KIND_ID_FUNCTION, "T", "Text Command")
KIND_MAP = {
'application': KIND_APPLICATION,
'window': KIND_WINDOW,
'text': KIND_TEXT,
}
KIND_COMMAND = (sublime.KIND_ID_FUNCTION, "C", "Command") # fallback
KIND_SNIPPET = sublime.KIND_SNIPPET
logger = logging.getLogger(__name__)
def _escape_in_snippet(v):
return v.replace("}", "\\}").replace("$", "\\$")
def is_plugin(view):
"""Use some heuristics to determine whether a Python view shows a plugin.
Or the console input widget, should it be using the Python syntax.
"""
return (view.find("import sublime", 0, sublime.LITERAL) is not None
or sublime.packages_path() in (view.file_name() or "")
or view.settings().get('is_widget'))
def create_args_snippet_from_command_args(command_args, quote_char='"', for_json=True):
"""Create an argument snippet to insert from the arguments to run a command.
Parameters:
command_args (dict)
The arguments with their default value.
quote_char (str)
Which char should be used for string quoting.
for_json (bool)
Whether it should be done for a json or a python file.
Returns (str)
The formatted entry to insert into the sublime text package
file.
"""
counter = itertools.count(1)
def make_snippet_item(k, v):
if v is not None:
if isinstance(v, str):
v = '{q}${{{i}:{v}}}{q}'.format(i=next(counter),
v=_escape_in_snippet(v),
q=quote_char)
else:
if for_json:
dumps = json.dumps(v)
else: # python
dumps = repr(v)
v = '${{{i}:{v}}}'.format(i=next(counter), v=_escape_in_snippet(dumps))
else:
v = '${i}'.format(i=next(counter))
return '{q}{k}{q}: {v}'.format(k=k, v=v, q=quote_char)
keys = iter(command_args)
if not isinstance(command_args, OrderedDict):
keys = sorted(keys)
snippet_items = (make_snippet_item(k, command_args[k]) for k in keys)
if for_json:
args_content = ",\n\t".join(snippet_items)
args_snippet = '"args": {{\n\t{0}\n}},$0'.format(args_content)
else:
args_content = ", ".join(snippet_items)
args_snippet = '{{{0}}}'.format(args_content)
return args_snippet
def _builtin_completions(names):
_, data = get_builtin_command_meta_data()
for name in names:
yield sublime.CompletionItem(
trigger=name,
annotation="built-in",
completion=name,
kind=KIND_MAP.get(data[name].get("command_type"), KIND_COMMAND),
details=data[name].get('doc_string') or "",
# TODO link to show full description
)
def _plugin_completions(cmd_classes):
for cmd_class in cmd_classes:
name = get_command_name(cmd_class)
module = cmd_class.__module__
package_name = module.split(".")[0]
if issubclass(cmd_class, sublime_plugin.TextCommand):
kind = KIND_TEXT
elif issubclass(cmd_class, sublime_plugin.WindowCommand):
kind = KIND_WINDOW
elif issubclass(cmd_class, sublime_plugin.ApplicationCommand):
kind = KIND_APPLICATION
else:
kind = KIND_COMMAND
yield sublime.CompletionItem(
trigger=name,
annotation=package_name,
completion=name,
kind=kind,
details=(cmd_class.__doc__ or "").strip(),
# TODO link to show full description
)
def _create_completions(command_type=""):
completions = []
completions.extend(_builtin_completions(get_builtin_commands(command_type)))
completions.extend(_plugin_completions(iter_python_command_classes(command_type)))
logger.debug("Collected %d command completions", len(completions))
return completions
class SublimeTextCommandCompletionListener(sublime_plugin.EventListener):
@inhibit_word_completions
def on_query_completions(self, view, prefix, locations):
keymap_scope = "source.json.sublime meta.command-name"
loc = locations[0]
if not view.score_selector(loc, keymap_scope):
return
return _create_completions()
class SublimeTextCommandCompletionPythonListener(sublime_plugin.EventListener):
_RE_LINE_BEFORE = re.compile(
r"(?P<callervar>\w+)\s*\.\s*run_command\s*\("
r"\s*['\"]\w*$",
re.MULTILINE
)
@inhibit_word_completions
def on_query_completions(self, view, prefix, locations):
loc = locations[0]
python_arg_scope = ("source.python meta.function-call.arguments.python string.quoted")
if not view.score_selector(loc, python_arg_scope) or not is_plugin(view):
return None
before_region = sublime.Region(view.line(loc).a, loc)
lines = view.line(sublime.Region(view.line(locations[0]).a - 1, loc))
before_region = sublime.Region(lines.a, loc)
before = view.substr(before_region)
m = self._RE_LINE_BEFORE.search(before)
if not m:
return None
# get the command type
caller_var = m.group('callervar')
logger.debug("caller_var: %s", caller_var)
if "view" in caller_var or caller_var == "v":
command_type = 'text'
elif caller_var == "sublime":
command_type = 'app'
else:
# window.run_command allows all command types
command_type = ''
return _create_completions(command_type)
class SublimeTextCommandArgsCompletionListener(sublime_plugin.EventListener):
_default_args = [("args\targuments", '"args": {\n\t"$1": "$2"$0\n},')]
_st_insert_arg_scope = (
"("
" ("
+ ", ".join("source.json.sublime.{}".format(suffix)
for suffix in ("commands", "keymap", "macro", "menu", "mousemap"))
+ ")"
" & "
" meta.sequence meta.mapping"
" - meta.sequence meta.mapping meta.mapping"
")"
"- string "
"- comment "
"- ("
" meta.value.json "
" | meta.mapping.json meta.mapping.json "
" | meta.sequence.json meta.sequence.json "
" - meta.menu.collection.sublime-menu"
")"
)
_RE_COMMAND_SEARCH = re.compile(r'\"command\"\s*\:\s*\"(\w+)\"')
def on_query_completions(self, view, prefix, locations):
if not view.score_selector(locations[0], self._st_insert_arg_scope):
return
# extract the line and the line above to search for the command
lines_reg = view.line(sublime.Region(view.line(locations[0]).a - 1, locations[0]))
lines = view.substr(lines_reg)
results = self._RE_COMMAND_SEARCH.findall(lines)
if not results:
return self._default_args
command_name = results[-1]
logger.debug("building args completions for command %r", command_name)
command_args = get_args_from_command_name(command_name)
if not command_args:
return self._default_args
completion = create_args_snippet_from_command_args(command_args, for_json=True)
return [sublime.CompletionItem(
trigger="args",
annotation="auto-detected",
completion=completion,
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SNIPPET,
)]
class SublimeTextCommandArgsCompletionPythonListener(sublime_plugin.EventListener):
_default_args_dict = {
c: sublime.CompletionItem(
trigger="args",
completion="{{{q}$1{q}: $0}}".format(q=c),
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SNIPPET,
)
for c in "'\""
}
_RE_LINE_BEFORE = re.compile(
r"\w+\s*\.\s*run_command\s*\("
r"\s*(['\"])(\w+)\1,\s*\w*$"
)
def on_query_completions(self, view, prefix, locations):
loc = locations[0]
python_arg_scope = "source.python meta.function-call.arguments.python,"
if not view.score_selector(loc, python_arg_scope) or not is_plugin(view):
return
before_region = sublime.Region(view.line(loc).a, loc)
before = view.substr(before_region)
m = self._RE_LINE_BEFORE.search(before)
if not m:
return
quote_char, command_name = m.groups()
logger.debug("building args completions for command %r", command_name)
command_args = get_args_from_command_name(command_name)
if command_args is None:
return self._default_args_dict[quote_char]
completion = create_args_snippet_from_command_args(command_args, quote_char,
for_json=False)
return [sublime.CompletionItem(
trigger="args",
annotation="auto-detected",
completion=completion,
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SNIPPET,
)]
| mit | -3,680,533,947,201,142,300 | 33.657343 | 94 | 0.595036 | false |
xymus/pycaptain | src/common/comms.py | 1 | 19189 | from orders import *
from gfxs import *
import ids
local_version = "v0.6.0" # major version
revision = "$Revision: 107 $" # updated by subversion
revisionSplitted = revision.split()
if len(revisionSplitted) > 2:
local_version = "%sr%s" % ( local_version, revisionSplitted[1] )
else:
local_version = "%srNA" % ( local_version )
version = "%s_%s" %( local_version, ids.local_version )
if __debug__:
print "PyCaptain %s" % version
class AttackOrder:
def __init__(self):
self.target
self.weapon
# inputs -> server
class COInput:
def __init__(self,xc=0,yc=0,wc=320,hc=320):
self.xc = xc
self.yc = yc
self.wc = wc
self.hc = hc
self.orders = []
self.mouseDownAt = (0,0)
self.mouseUpAt = (0,0)
self.mouseDownAtV = (0,0)
self.mouseUpAtV = (0,0)
self.mouseUpped = False
self.mouseRightUpped = False
self.mousePos = (0,0)
self.right = False
self.left = False
self.up = False
self.down = False
def dump(self):
dump = "%i;%i;%i;%i" % ( self.xc, self.yc, self.wc, self.hc )
for order in self.orders:
if isinstance( order, OrderMove ):
dump = dump + ";%i:%i:%i:%7f" % ( ids.O_MOVE, order.x, order.y, order.ori )
if isinstance( order, OrderStopMove ):
dump = dump + ";%i:%.2f" % ( ids.O_STOP_MOVE, order.ori )
elif isinstance( order, OrderRecallShips ):
dump = dump + ";%i:%i" % ( ids.O_RECALL_SHIPS, order.type )
elif isinstance( order, OrderLaunchShips ):
dump = dump + ";%i:%i" % ( ids.O_LAUNCH_SHIPS, order.type )
elif isinstance( order, OrderJumpNow ):
dump = dump + ";%i" % ( ids.O_JUMP_NOW )
elif isinstance( order, OrderJump ):
dump = dump + ";%i:%i:%i" % ( ids.O_JUMP, order.x, order.y )
elif isinstance( order, OrderLaunchMissile ):
dump = dump + ";%i:%i:%i:%i" % ( ids.O_LAUNCH_MISSILE, order.type, order.x, order.y )
elif isinstance( order, OrderAttack ):
dump = dump + ";%i:%i" % ( ids.O_ATTACK, order.obj )
elif isinstance( order, OrderOrbit ):
dump = dump + ";%i:%i" % ( ids.O_ORBIT, order.obj )
elif isinstance( order, OrderBuildTurret ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_TURRET, order.tp, order.type )
elif isinstance( order, OrderBuildShip ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_SHIP, order.type, order.rate )
elif isinstance( order, OrderBuildMissile ):
dump = dump + ";%i:%i:%i" % ( ids.O_BUILD_MISSILE, order.type, order.rate )
elif isinstance( order, OrderActivateTurret ):
dump = dump + ";%i:%i:%i" % ( ids.O_TURRET_ACTIVATE, order.turret, order.activate )
elif isinstance( order, OrderActivateShield ):
dump = dump + ";%i:%i" % ( ids.O_CHARGE, order.activate )
elif isinstance( order, OrderActivateRepair ):
dump = dump + ";%i:%i" % ( ids.O_REPAIR, order.activate )
elif isinstance( order, OrderSetRelation ):
dump = dump + ";%i:%s:%i" % ( ids.O_RELATION, order.other, order.level )
elif isinstance( order, OrderSelfDestruct ):
dump = dump + ";%i" % ( ids.O_SELF_DESTRUCT )
elif isinstance( order, OrderBroadcast ):
dump = dump + ";%i:%s" % ( ids.O_BROADCAST, order.text )
elif isinstance( order, OrderDirectedCast ):
dump = dump + ";%i:%s:%i:%i" % ( ids.O_DIRECTED_CAST, order.text, order.x, order.y )
return dump
def CopyCOInput( input ):
return COInput( input.xc, input.yc, input.wc, input.hc )
def LoadCOInput( text ):
es = text.split(";")
inputs = COInput( int(es[0]), int(es[1]), int(es[2]), int(es[3]) )
if len(es[4:]) > 0:
for e in es[4:]: #range(int(es[4])):
os = e.split(":")
if int(os[0]) == ids.O_MOVE:
order = OrderMove( int(os[1]), int(os[2]), float(os[3]) )
elif int(os[0]) == ids.O_STOP_MOVE:
order = OrderStopMove( float(os[1]) )
elif int(os[0]) == ids.O_RECALL_SHIPS:
order = OrderRecallShips( int(os[1]) )
elif int(os[0]) == ids.O_LAUNCH_SHIPS:
order = OrderLaunchShips( int(os[1]) )
elif int(os[0]) == ids.O_JUMP_NOW:
order = OrderJumpNow()
elif int(os[0]) == ids.O_JUMP:
order = OrderJump( (int(os[1]), int(os[2])) )
elif int(os[0]) == ids.O_LAUNCH_MISSILE:
order = OrderLaunchMissile( int(os[1]), (int(os[2]), int(os[3])) )
elif int(os[0]) == ids.O_ATTACK:
order = OrderAttack( int(os[1]) )
elif int(os[0]) == ids.O_ORBIT:
order = OrderOrbit( int(os[1]) )
elif int(os[0]) == ids.O_BUILD_TURRET:
order = OrderBuildTurret( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_BUILD_SHIP:
order = OrderBuildShip( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_BUILD_MISSILE:
order = OrderBuildMissile( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_TURRET_ACTIVATE:
order = OrderActivateTurret( int(os[1]), int(os[2]) )
elif int(os[0]) == ids.O_CHARGE:
order = OrderActivateShield( int(os[1]) )
elif int(os[0]) == ids.O_REPAIR:
order = OrderActivateRepair( int(os[1]) )
elif int(os[0]) == ids.O_RELATION:
order = OrderSetRelation( os[1], int(os[2]) )
elif int(os[0]) == ids.O_SELF_DESTRUCT:
order = OrderSelfDestruct()
elif int(os[0]) == ids.O_BROADCAST:
order = OrderBroadcast( os[1] )
elif int(os[0]) == ids.O_DIRECTED_CAST:
order = OrderDirectedCast( os[1], (int(os[2]), int(os[3])) )
# order = OrderMove( int(es[5+3*i]), int(es[6+3*i]), float(es[7+3*i]) )
inputs.orders.append( order )
return inputs
# objects -> client
class COObject:
def __init__(self,type,xp,yp,zp,ori,uid,selectRadius,relation=ids.U_NEUTRAL):
self.type = type
self.xp = xp # int(xp+0.5)
self.yp = yp # int(yp+0.5)
self.zp = zp
self.ori = ori
self.uid = uid
self.selectRadius = selectRadius
self.relation = relation
def dump(self):
dump = "%i;%i;%i;%i;%.2f;%i;%i;%i" % ( self.type, self.xp, self.yp, self.zp, self.ori, self.uid, self.selectRadius, self.relation )
return dump
def LoadCOObject( text ):
es = text.split(";")
return COObject( int(es[0]), int(es[1]), int(es[2]), int(es[3]), float(es[4]), int(es[5]), int(es[6]), int(es[7]) )
class COObjects:
def __init__(self,coobjects):
self.coobjects = coobjects
def dump(self):
dumps = [ coobject.dump() for coobject in self.coobjects ]
dump = ":".join(dumps)
return dump
def LoadCOObjects( text ):
bs = text.split(":")
coobjects = []
coobject = None
for b in bs:
try:
coobject = LoadCOObject( b )
coobjects.append( coobject )
except Exception, ex:
print "failed LoadCOOBject:", ex
return COObjects( coobjects )
# stats -> client
class COPlayerStatus:
def __init__(self, gameTick, dead, ore, maxOre,
energy, maxEnergy, shieldIntegrity, hullIntegrity,
canJump, repairing, charging, hangarSpace,
shipsSpace, missilesSpace, jumpCharge, jumpRecover,
oreInProcess, turrets, missiles, ships,
radars, ennemyInRadar=False, dangerInRadar=False ): # , buildableTurrets
self.gameTick = gameTick
self.dead = dead
self.ore = ore
self.maxOre = maxOre
self.energy = energy
self.maxEnergy = maxEnergy
self.shieldIntegrity = shieldIntegrity
self.hullIntegrity = hullIntegrity
self.hangarSpace = hangarSpace
self.shipsSpace = shipsSpace
self.missilesSpace = missilesSpace
self.oreInProcess = oreInProcess
self.canJump = canJump
self.jumpCharge = jumpCharge
self.jumpRecover = jumpRecover
self.turrets = turrets
self.missiles = missiles
self.ships = ships
self.radars = radars
self.repairing = repairing
self.charging = charging
self.ennemyInRadar = ennemyInRadar
self.dangerInRadar = dangerInRadar
def dump(self):
if self.dead:
dump = "%i" % ( self.gameTick )
else:
dump = "%i;%i;%i;%i;%i;%.2f;%.2f;%i;%i;%i;%i;%i;%i;%i;%i;%i;%i" % ( self.gameTick, self.ore, self.maxOre, self.energy, self.maxEnergy, self.shieldIntegrity, self.hullIntegrity, self.canJump, self.repairing, self.charging, self.hangarSpace, self.shipsSpace, self.missilesSpace, self.jumpCharge, self.jumpRecover, self.ennemyInRadar, self.dangerInRadar )
dump = dump + ";"
for oip in self.oreInProcess:
dump = dump + "%i:"% oip
dump = dump + ";"
for turret in self.turrets:
dump = dump + "%i:%i:%i:%.2f:%.2f:%i:%i:%i:%i:%i:%i:%i:%i:" % ( turret.type, turret.xp, turret.yp, turret.minAngle, turret.maxAngle, turret.buildPerc,turret.range,turret.on,turret.activable,turret.useEnergy,turret.useOre,turret.energyRebate, turret.oreRebate )
for bt in turret.buildables:
dump = dump + "%i_%i/" % ( bt.type, bt.canBuild ) # , bt.energyCost, bt.oreCost, bt.category )
dump = dump + "|"
dump = dump + ";"
for ship in self.missiles:
dump = dump + "%i:%i:%i:%i:%i:%i:%i|" % ( ship.type, ship.usable, ship.canLaunch, ship.nbr, ship.buildPerc, ship.canBuild, ship.show )
dump = dump + ";"
for ship in self.ships:
dump = dump + "%i:%i:%i:%i:%i:%i:%i|" % ( ship.type, ship.usable, ship.canLaunch, ship.nbr, ship.buildPerc, ship.canBuild, ship.show )
dump = dump + ";"
for radar in self.radars:
dump = dump + "%i:%i:%i|" % ( radar.xr, radar.yr, radar.range )
return dump
def LoadCOPlayerStatus( text ):
es = text.split(";")
oreInProcess = []
turrets = []
missiles = []
ships = []
buildableTurrets = []
radars = []
if len(es)==2: # dead
for o in es[ 1 ].split("|"):
if len( o ) > 0:
i = o.split(":")
radars.append( CORadar( (int(i[0]), int(i[1])), int(i[2]) ) )
stats = COPlayerStatus( int(es[0]), True, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0,0, 0, oreInProcess, turrets, missiles, ships, radars )
else:
for o in es[ 17 ].split(":"):
if len( o ) > 0:
oreInProcess.append( int(o) )
for o in es[ 18 ].split("|"):
if len( o ) > 0:
i = o.split(":")
buildables = []
for b in i[ 13 ].split("/"):
if len( b ) > 0:
bi = b.split("_")
buildables.append( COBuildable( int(bi[0]), int(bi[1]) ) )
turrets.append( COTurret( int(i[0]), int(i[1]), int(i[2]), float(i[3]), float(i[4]), int(i[5]), int(i[6]), int(i[7]), int(i[8]), int(i[9]), int(i[10]), int(i[11]), int(i[12]), buildables ) )
for o in es[ 19 ].split("|"):
if len( o ) > 0:
i = o.split(":")
missiles.append( COShips( int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5]), int(i[6]) ) )
for o in es[ 20 ].split("|"):
if len( o ) > 0:
i = o.split(":")
ships.append( COShips( int(i[0]), int(i[1]), int(i[2]), int(i[3]), int(i[4]), int(i[5]), int(i[6]) ) )
for o in es[ 21 ].split("|"):
if len( o ) > 0:
i = o.split(":")
radars.append( CORadar( (int(i[0]), int(i[1])), int(i[2]) ) )
stats = COPlayerStatus( int(es[0]), False, int(es[1]), int(es[2]), int(es[3]), int(es[4]), float(es[5]), float(es[6]), int(es[7]), int(es[8]), int(es[9]), int(es[10]), int(es[11]), int(es[12]), int(es[13]), int(es[14]), oreInProcess, turrets, missiles, ships, radars, int(es[15]), int(es[16]) )
return stats
class COPossible:
def __init__( self, ship, race, nbrTurrets, speed, shield, hull, hangar, canJump, civilians ):
self.ship = ship
self.race = race
self.nbrTurrets = nbrTurrets
self.speed = speed
self.shield = shield
self.hull = hull
self.hangar = hangar
self.canJump = canJump
self.civilians = civilians
class COPossibles:
def __init__( self, ships ):
self.ships = ships
def dump(self):
strings = [ "%i;%i;%i;%i;%i;%i;%i;%i;%i"%(ship.ship, ship.race, ship.nbrTurrets, ship.speed, ship.shield, ship.hull, ship.hangar, ship.canJump, ship.civilians) for ship in self.ships ]
return ":".join( strings )
def LoadCOPossibles( text ):
ss = text.split(":")
ships = []
for s in ss:
if len( s ) > 0:
es = s.split(";")
ships.append( COPossible( int(es[0]), int(es[1]), int(es[2]), int(es[3]), int(es[4]), int(es[5]), int(es[6]), int(es[7]), int(es[8]) ) )
return COPossibles( ships )
class COPlayer:
def __init__( self, name, race, relIn, relOut, isHuman ):
self.name = name
self.race = race
self.relIn = relIn
self.relOut = relOut
self.isHuman = isHuman
class COPlayers:
def __init__( self, players ):
self.players = players
def dump(self):
strings = [ "%s;%i;%i;%i;%i"%(player.name,player.race,player.relIn,player.relOut,player.isHuman) for player in self.players ]
return ":".join( strings )
def LoadCOPlayers( text ):
ss = text.split(":")
players = []
for s in ss:
if len( s ) > 0:
# print s
es = s.split(";")
players.append( COPlayer( es[0], int(es[1]), int(es[2]), int(es[3]), int(es[4]) ) )
# print "loaded", players
return COPlayers( players ) # COPlayers( [ es = s.split(";"); COPlayer( es[0], int(es[1]), int(es[2]), int(es[3]), int(es[4]) ) for s in ss ] )
class COTurret:
def __init__( self, type, xp, yp, minAngle, maxAngle, buildPerc, range, on, activable, useEnergy, useOre, energyRebate, oreRebate, buildables ):
self.type = type
self.xp = xp
self.yp = yp
self.minAngle = minAngle
self.maxAngle = maxAngle
self.buildPerc = buildPerc
self.range = range
self.on = on
self.activable = activable
self.useEnergy = useEnergy
self.useOre = useOre
self.buildables = buildables
self.energyRebate = energyRebate
self.oreRebate = oreRebate
class COShips:
def __init__( self, type, usable, canLaunch, nbr, buildPerc, canBuild, show ):
self.type = type
self.usable = usable
self.canLaunch = canLaunch
self.nbr = nbr
self.buildPerc = buildPerc
self.canBuild = canBuild
self.show = show
class COBuildable:
def __init__( self, type, canBuild ): # , energyCost, oreCost, category
self.type = type
self.canBuild = canBuild
# self.energyCost = energyCost
# self.oreCost = oreCost
# self.category = category
class COAstre:
def __init__(self, type, xp, yp, radius=0 ):
self.type = type
self.xp = xp
self.yp = yp
self.radius = radius
def dump(self):
dump = "%i;%i;%i;%i;" % ( self.type, self.xp, self.yp, self.radius )
return dump
class CORadar:
def __init__( self, (xr,yr), range ):
self.xr = xr
self.yr = yr
self.range = range
def LoadCOAstre( text ):
es = text.split(";")
co = int(es[4])
return COAstre( int(es[0]), int(es[1]), int(es[2]), int(es[3]) )
class COGfxs:
def __init__(self, gfxs ):
self.gfxs = gfxs
def dump(self):
dump = "%i" % ( len(self.gfxs) )
for gfx in self.gfxs:
if isinstance( gfx, GfxLaser ):
dump = dump + ";%i:%i:%i:%i:%i:%i:%i:%i" % (ids.G_LASER_SMALL, gfx.xp,gfx.yp,gfx.z,gfx.xd,gfx.yd, gfx.width, gfx.color)
elif isinstance( gfx, GfxExplosion ):
dump = dump + ";%i:%i:%i:%i:%i:%i" % (ids.G_EXPLOSION, gfx.xp,gfx.yp,gfx.radius,gfx.sound,gfx.delai)
elif isinstance( gfx, GfxShield ):
dump = dump + ";%i:%i:%i:%i:%i:%.3f:%.3f" % (ids.G_SHIELD, gfx.xp,gfx.yp,gfx.radius,gfx.strength,gfx.angle,gfx.hit)
elif isinstance( gfx, GfxExhaust ): # careful, GfxExhaust inherits of GfxFragment
pass # TODO, removed because not used on client side
elif isinstance( gfx, GfxFragment ):
dump = dump + ";%i:%i:%i:%i:%.2f:%.2f:%.2f:%.2f:%i:%i" % (ids.G_FRAGMENT, gfx.xp,gfx.yp,gfx.zp,gfx.ori,gfx.xi,gfx.yi,gfx.ri,gfx.type,gfx.ttl)
elif isinstance( gfx, GfxLightning ):
dump = dump + ";%i:%i:%i:%i:%i:%i:%i" % (ids.G_LIGHTNING, gfx.xp,gfx.yp,gfx.z,gfx.xd,gfx.yd, gfx.strength )
elif isinstance( gfx, GfxJump ):
dump = dump + ";%i:%i:%i:%i:%i:%i" % (ids.G_JUMP, gfx.xp,gfx.yp,gfx.radius,gfx.angle*100,gfx.delai)
return dump
def LoadCOGfxs( text ):
gfxs = []
es = text.split(";")
n = int(es[0])
for e in es[1:]:
ss = e.split(":")
if int(ss[ 0 ]) == ids.G_LASER_SMALL:
gfx = GfxLaser( (int(ss[1]),int(ss[2])), int(ss[3]), (int(ss[4]),int(ss[5])), int(ss[6]), int(ss[7]) )
elif int(ss[ 0 ]) == ids.G_EXPLOSION:
gfx = GfxExplosion( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), int(ss[5]) )
elif int(ss[ 0 ]) == ids.G_SHIELD:
gfx = GfxShield( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), float(ss[5]), float(ss[6]) )
elif int(ss[ 0 ]) == ids.G_FRAGMENT:
gfx = GfxFragment( (int(ss[1]),int(ss[2])), int(ss[3]), float(ss[4]), float(ss[5]), float(ss[6]), float(ss[7]), int(ss[8]), int(ss[9]) )
elif int(ss[ 0 ]) == ids.G_EXHAUST:
gfx = GfxExhaust( (int(ss[1]),int(ss[2])), int(ss[3]), float(ss[4]), float(ss[5]), float(ss[6]), float(ss[7]) )
elif int(ss[ 0 ]) == ids.G_LIGHTNING:
gfx = GfxLightning( (int(ss[1]),int(ss[2])), int(ss[3]), (int(ss[4]),int(ss[5])), int(ss[6]) )
elif int(ss[ 0 ]) == ids.G_JUMP:
gfx = GfxExplosion( (int(ss[1]),int(ss[2])), int(ss[3]), int(ss[4]), float(ss[5])/100, int(ss[5]) )
else: print int(ss[ 0 ])
gfxs.append( gfx )
return gfxs
class COScore:
def __init__( self, playerName, score, deaths ):
pass
class COEnd:
def __init__( self, scores, ):
pass
class COBegin:
pass
class COTutorialMsg:
pass
| gpl-2.0 | -5,088,383,613,936,897,000 | 38.397895 | 360 | 0.530043 | false |
v-legoff/pa-poc3 | src/command/start/start.py | 1 | 2023 | # Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the start default command."""
from command import Command
class Start(Command):
"""Command 'start'.
This command should be used as a container to start sessions, processes
or different actions.
"""
name = "start"
brief = "start a process"
description = \
"This command is used to start a session, a process or a suite " \
"of actions, like the server itself or even the Python Aboard daemon."
| bsd-3-clause | 2,614,261,551,016,107,000 | 42.978261 | 79 | 0.750865 | false |
ebasso/rest-client-examples | ibm-connections/ListBlogPosts.py | 1 | 4246 | # -*- coding: utf-8 -*-
#
# Antes de executar:
#
# > pip install requests
#
#
import sys
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.auth import HTTPBasicAuth
import xml.etree.ElementTree as ET
CONNECTIONS_HOST = 'https://connections.<company>.com'
CONNECTIONS_USERNAME = '<REPLACE_HERE>'
CONNECTIONS_PASSWORD = '<REPLACE_HERE>'
# Disable Warnings from Untrusted TLs keys
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Disable Warnings from Untrusted TLs keys
def doGet(url,headers,auth):
res = requests.get(url=url,headers=headers,auth=auth, verify=False)
if (res.status_code != 200):
print 'requests.get -> %s = %s\n' % (res.url, res)
return None;
#print res.content
return res.content
def parsePostDetails(content):
entry = ET.fromstring(content)
#entry = root.find('{http://www.w3.org/2005/Atom}entry')
#print '------------------------------------------------------------------------------------------------------------------------------------------------------'
#for child in entry:
# print child.tag, child.attrib
#print '------------------------------------------------------------------------------------------------------------------------------------------------------'
title = entry.find('{http://www.w3.org/2005/Atom}title').text
author = entry.find('{http://www.w3.org/2005/Atom}author')
name = author.find('{http://www.w3.org/2005/Atom}name').text
email = author.find('{http://www.w3.org/2005/Atom}email').text
blogPost = {
'title': title.encode("cp850"),
'name': name,
'email': email
}
#print profile
return blogPost
def getPostDetails(handle=None,entryId=None):
if (handle == None or entryId == None):
return None
url = '%s/blogs/%s/api/entries/%s' % (CONNECTIONS_HOST,handle,entryId)
headers = { 'Content-Type': 'application/atom+xml;charset=UTF-8'}
auth=HTTPBasicAuth(CONNECTIONS_USERNAME, CONNECTIONS_PASSWORD)
feed = doGet(url=url,headers=headers,auth=auth)
if (feed is None):
return None
blogPost = parsePostDetails(feed)
if (blogPost is None):
print 'Cannot get Blog Post Information.'
return None
print 'Post Details:\n'
print blogPost['title']
print ' |--> name: ' + blogPost['name']
print ' |--> email: ' + blogPost['email']
def parseBlogPosts(content):
posts = []
root = ET.fromstring(content)
entries = root.findall("{http://www.w3.org/2005/Atom}entry")
for entry in entries:
entryId = entry.find('{http://www.w3.org/2005/Atom}id').text
title = entry.find('{http://www.w3.org/2005/Atom}title').text
author = entry.find('{http://www.w3.org/2005/Atom}author')
name = author.find('{http://www.w3.org/2005/Atom}name').text
email = author.find('{http://www.w3.org/2005/Atom}email').text
#urn:lsid:ibm.com:blogs:entry-048667f2-400b-4b70-8c04-cc163403cba6
entryId = entryId[-36:]
post = {
'entryId': entryId,
'title': title.encode('utf-8'),
'name': name,
'email': email
}
posts.append(post)
return posts
def getBlogPosts(handle=None):
if (handle == None):
return None
url = '%s/blogs/%s/api/entries' % (CONNECTIONS_HOST,handle)
headers = { 'Content-Type': 'application/atom+xml;charset=UTF-8'}
auth=HTTPBasicAuth(CONNECTIONS_USERNAME, CONNECTIONS_PASSWORD)
feed = doGet(url=url,headers=headers,auth=auth)
if (feed is None):
return None
posts = parseBlogPosts(feed)
if (posts is None):
return None
return posts
#################### Main Module ###################
print 'Connecting to IBM Connections...\n'
handle = 'ce8716a1-3575-44fd-8b2e-4f5360fe03e1'
#entryId = '66ce5af8-d7e2-451c-9435-3f236accfc12'
#getPostDetails(handle,entryId)
posts = getBlogPosts(handle)
if (posts is None):
print 'Cannot get Blog Posts Information.'
sys.exit(1)
print 'Blog Posts:\n'
for post in posts:
print post['entryId']
print ' |--> name: ' + post['title']
print ' |--> name: ' + post['name']
print ' |--> email: ' + post['email']
print
| mit | -1,592,454,718,963,992,800 | 31.661538 | 163 | 0.595384 | false |
andrewnsk/simple-weather | sweather/collect_weather.py | 1 | 1945 | import json
import pyowm
from . import azimuth
# openweathermap API key
# please use you own api key!
API_KEY = '3ede2418f1124401efcd68e5ae3bddcb'
town = 'Norilsk'
area = 'ru'
owm = pyowm.OWM(API_KEY)
observation = owm.weather_at_place('{0},{1}'.format(town, area))
w = observation.get_weather()
# print(w)
class GetWeather:
def __init__(self, location, country, owm_api_key):
self.location = location
self.country = country
self.owm_api_key = owm_api_key
self.owm = pyowm.OWM(owm_api_key)
self.observation = owm.weather_at_place('{0}{1},ru'.format(self.location, self.country))
self.w = self.observation.get_weather()
def wind_direction(self):
return str(azimuth.degree(round(json.loads(json.dumps(self.w.get_wind()), 1)['deg'])))
def wind_speed(self):
return str(round(json.loads(json.dumps(self.w.get_wind()))['speed']))
def temperature(self):
return str(round(json.loads(json.dumps(self.w.get_temperature('celsius')))['temp']))
def humidity(self):
return int(round(json.loads(json.dumps(self.w.get_humidity()))))
class HumanWeather:
def __init__(self):
pass
def get_weather_wind_direction(mode=True):
return str(azimuth.degree(round(json.loads(json.dumps(w.get_wind()), 1)['deg']), mode))
def get_weather_wind_speed():
return str(round(json.loads(json.dumps(w.get_wind()))['speed']))
def get_weather_temperature():
return str(round(json.loads(json.dumps(w.get_temperature('celsius')))['temp']))
def get_weather_humidity():
return int(round(json.loads(json.dumps(w.get_humidity()))))
def weather_wind():
return "Ветер: " + get_weather_wind_direction() + " " + get_weather_wind_speed() + " м/с"
def weather_temp():
return "Температура: " + get_weather_temperature()
def weather_humidity():
return "Влажность: " + str(get_weather_humidity()) + " %"
| gpl-3.0 | 6,671,246,398,581,805,000 | 25.273973 | 96 | 0.65537 | false |
Mesitis/community | sample-code/Python/13 Portfolio/get_portfolio_allocations.py | 1 | 3181 | '''
- login and get token
- process 2FA if 2FA is setup for this account
- if the user is a regular customer then get a list of transactions for this user
- if the user is a partner_admin then get a list of transactions for the first user from the list of users this partner admin has access to
'''
import requests
import json
get_token_url = "https://api.canopy.cloud:443/api/v1/sessions/"
validate_otp_url = "https://api.canopy.cloud:443/api/v1/sessions/otp/validate.json" #calling the production server for OTP authentication
get_transactions_url = "https://api.canopy.cloud:443/api/v1/transactions.json"
get_partner_users_url = "https://api.canopy.cloud:443/api/v1/admin/users.json"
get_portfolio_allocations_url = "https://api.canopy.cloud:443/api/v1/portfolio/allocations.json"
#please replace below with your username and password over here
username = 'userxxx'
password = 'passxxx'
#please enter the OTP token in case it is enabled
otp_code = '123456'
#first call for a fresh token
payload = "user%5Busername%5D=" + username + "&user%5Bpassword%5D=" + password
headers = {
'accept': "application/json",
'content-type':"application/x-www-form-urlencoded"
}
response = requests.request("POST", get_token_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True)
token = response.json()['token']
login_flow = response.json()['login_flow']
#in case 2FA is enabled use the OTP code to get the second level of authentication
if login_flow == '2fa_verification':
headers['Authorization'] = token
payload = 'otp_code=' + otp_code
response = requests.request("POST", validate_otp_url, data=payload, headers=headers)
print json.dumps(response.json(), indent=4, sort_keys = True) #print response.text
token = response.json()['token']
login_role = response.json()['role']
switch_user_id = response.json()['id']
if login_role == 'Partneradmin':
#print "============== partner's users ==========="
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8"
}
partner_users = []
response = requests.request("GET", get_partner_users_url, headers=headers)
for parent_user in response.json()['users']:
partner_users.append(parent_user['id'])
#print partner_users
#take the first users in the list as the switch_user_id
switch_user_id = partner_users[0]
# print json.dumps(response.json(), indent=4, sort_keys = True)
#in case the user is a partner_admin then switch_user_id is any one of the users it has access to (here we take the first one from the list)
#in case the user is a regular customer then the switch_user_id = user_id for this customer
#date and date type are variable
date = "04-04-2016"
date_type = "traded_on"
querystring = {"date":date,"date_type":date_type}
headers = {
'authorization': token,
'content-type': "application/x-www-form-urlencoded; charset=UTF-8",
'x-app-switch-user': str(switch_user_id)
}
response = requests.request("GET", get_portfolio_allocations_url, headers=headers, params=querystring)
print json.dumps(response.json(), indent=4, sort_keys = False)
| mit | -6,791,151,513,681,004,000 | 34.344444 | 140 | 0.711412 | false |
makielab/django-oscar | oscar/apps/dashboard/promotions/views.py | 1 | 11239 | import itertools
from django.views import generic
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.db.models import Count
from django.shortcuts import HttpResponse
from oscar.core.loading import get_classes
from oscar.apps.promotions.conf import PROMOTION_CLASSES
SingleProduct, RawHTML, Image, MultiImage, \
AutomaticProductList, PagePromotion, \
HandPickedProductList = get_classes('promotions.models',
['SingleProduct', 'RawHTML', 'Image', 'MultiImage', 'AutomaticProductList',
'PagePromotion', 'HandPickedProductList'])
SelectForm, RawHTMLForm, PagePromotionForm, HandPickedProductListForm, OrderedProductFormSet = get_classes('dashboard.promotions.forms',
['PromotionTypeSelectForm', 'RawHTMLForm', 'PagePromotionForm',
'HandPickedProductListForm', 'OrderedProductFormSet'])
class ListView(generic.TemplateView):
template_name = 'dashboard/promotions/promotion_list.html'
def get_context_data(self):
# Need to load all promotions of all types and chain them together
# no pagination required for now.
data = []
num_promotions = 0
for klass in PROMOTION_CLASSES:
objects = klass.objects.all()
num_promotions += objects.count()
data.append(objects)
promotions = itertools.chain(*data)
ctx = {
'num_promotions': num_promotions,
'promotions': promotions,
'select_form': SelectForm(),
}
return ctx
class CreateRedirectView(generic.RedirectView):
permanent = True
def get_redirect_url(self, **kwargs):
code = self.request.GET.get('promotion_type', None)
urls = {}
for klass in PROMOTION_CLASSES:
urls[klass.classname()] = reverse('dashboard:promotion-create-%s' %
klass.classname())
return urls.get(code, None)
class PageListView(generic.TemplateView):
template_name = 'dashboard/promotions/pagepromotion_list.html'
def get_context_data(self, *args, **kwargs):
pages = PagePromotion.objects.all().values(
'page_url').distinct().annotate(freq=Count('id'))
return {'pages': pages}
class PageDetailView(generic.TemplateView):
template_name = 'dashboard/promotions/page_detail.html'
def get_context_data(self, *args, **kwargs):
path = self.kwargs['path']
return {'page_url': path,
'positions': self.get_positions_context_data(path), }
def get_positions_context_data(self, path):
ctx = []
for code, name in settings.OSCAR_PROMOTION_POSITIONS:
promotions = PagePromotion._default_manager.select_related() \
.filter(page_url=path,
position=code) \
.order_by('display_order')
ctx.append({
'code': code,
'name': name,
'promotions': promotions,
})
return ctx
def post(self, request, **kwargs):
"""
When called with a post request, try and get 'promo[]' from
the post data and use it to reorder the page content blocks.
"""
data = dict(request.POST).get('promo[]')
self._save_page_order(data)
return HttpResponse(status=200)
def _save_page_order(self, data):
"""
Save the order of the pages. This gets used when an ajax request
posts backa new order for promotions within page regions.
"""
for index, item in enumerate(data):
page = PagePromotion.objects.get(pk=item)
if page.display_order != index:
page.display_order = index
page.save()
class PromotionMixin(object):
def get_template_names(self):
return ['dashboard/promotions/%s_form.html' % self.model.classname(),
'dashboard/promotions/form.html']
class DeletePagePromotionView(generic.DeleteView):
template_name = 'dashboard/promotions/delete_pagepromotion.html'
model = PagePromotion
def get_success_url(self):
messages.info(self.request, _("Content block removed successfully"))
return reverse('dashboard:promotion-list-by-url',
kwargs={'path': self.object.page_url})
# ============
# CREATE VIEWS
# ============
class CreateView(PromotionMixin, generic.CreateView):
def get_success_url(self):
messages.success(self.request, _("Content block created successfully"))
return reverse('dashboard:promotion-update',
kwargs={'ptype': self.model.classname(),
'pk': self.object.id})
def get_context_data(self, *args, **kwargs):
ctx = super(CreateView, self).get_context_data(*args, **kwargs)
ctx['heading'] = self.get_heading()
return ctx
def get_heading(self):
if hasattr(self, 'heading'):
return getattr(self, 'heading')
return _('Create a new %s content block') % self.model._type
class CreateRawHTMLView(CreateView):
model = RawHTML
form_class = RawHTMLForm
class CreateSingleProductView(CreateView):
model = SingleProduct
class CreateImageView(CreateView):
model = Image
class CreateAutomaticProductListView(CreateView):
model = AutomaticProductList
class CreateHandPickedProductListView(CreateView):
model = HandPickedProductList
form_class = HandPickedProductListForm
def get_context_data(self, **kwargs):
ctx = super(CreateHandPickedProductListView,
self).get_context_data(**kwargs)
if 'product_formset' not in kwargs:
ctx['product_formset'] = OrderedProductFormSet(instance=self.object)
return ctx
def form_valid(self, form):
promotion = form.save(commit=False)
product_formset = OrderedProductFormSet(self.request.POST,
instance=promotion)
if product_formset.is_valid():
promotion.save()
product_formset.save()
self.object = promotion
messages.success(self.request, _('Product list content block created'))
return HttpResponseRedirect(self.get_success_url())
ctx = self.get_context_data(product_formset=product_formset)
return self.render_response(ctx)
# ============
# UPDATE VIEWS
# ============
class UpdateView(PromotionMixin, generic.UpdateView):
actions = ('add_to_page', 'remove_from_page')
link_form_class = PagePromotionForm
def get_context_data(self, *args, **kwargs):
ctx = super(UpdateView, self).get_context_data(*args, **kwargs)
ctx['heading'] = _("Update content block")
ctx['promotion'] = self.get_object()
ctx['link_form'] = self.link_form_class()
content_type = ContentType.objects.get_for_model(self.model)
ctx['links'] = PagePromotion.objects.filter(content_type=content_type,
object_id=self.object.id)
return ctx
def post(self, request, *args, **kwargs):
action = request.POST.get('action', None)
if action in self.actions:
self.object = self.get_object()
return getattr(self, action)(self.object, request, *args, **kwargs)
return super(UpdateView, self).post(request, *args, **kwargs)
def get_success_url(self):
messages.info(self.request, _("Content block updated successfully"))
return reverse('dashboard:promotion-list')
def add_to_page(self, promotion, request, *args, **kwargs):
instance = PagePromotion(content_object=self.get_object())
form = self.link_form_class(request.POST, instance=instance)
if form.is_valid():
form.save()
page_url = form.cleaned_data['page_url']
messages.success(request, _("Content block '%(block)s' added to page '%(page)s'") % {
'block': promotion.name,
'page': page_url})
return HttpResponseRedirect(reverse('dashboard:promotion-update', kwargs=kwargs))
main_form = self.get_form_class()(instance=self.object)
ctx = self.get_context_data(form=main_form)
ctx['link_form'] = form
return self.render_to_response(ctx)
def remove_from_page(self, promotion, request, *args, **kwargs):
link_id = request.POST['pagepromotion_id']
try:
link = PagePromotion.objects.get(id=link_id)
except PagePromotion.DoesNotExist:
messages.error(request, _("No link found to delete"))
else:
page_url = link.page_url
link.delete()
messages.success(request, _("Content block removed from page '%s'") % page_url)
return HttpResponseRedirect(reverse('dashboard:promotion-update', kwargs=kwargs))
class UpdateRawHTMLView(UpdateView):
model = RawHTML
form_class = RawHTMLForm
class UpdateSingleProductView(UpdateView):
model = SingleProduct
class UpdateImageView(UpdateView):
model = Image
class UpdateAutomaticProductListView(UpdateView):
model = AutomaticProductList
class UpdateHandPickedProductListView(UpdateView):
model = HandPickedProductList
form_class = HandPickedProductListForm
def get_context_data(self, **kwargs):
ctx = super(UpdateHandPickedProductListView,
self).get_context_data(**kwargs)
if 'product_formset' not in kwargs:
ctx['product_formset'] = OrderedProductFormSet(instance=self.object)
return ctx
def form_valid(self, form):
promotion = form.save(commit=False)
product_formset = OrderedProductFormSet(self.request.POST,
instance=promotion)
if product_formset.is_valid():
promotion.save()
product_formset.save()
self.object = promotion
messages.success(self.request, _('Product list promotion updated'))
return HttpResponseRedirect(self.get_success_url())
ctx = self.get_context_data(product_formset=product_formset)
return self.render_to_response(ctx)
# ============
# DELETE VIEWS
# ============
class DeleteView(generic.DeleteView):
template_name = 'dashboard/promotions/delete.html'
def get_success_url(self):
messages.info(self.request, _("Content block deleted successfully"))
return reverse('dashboard:promotion-list')
class DeleteRawHTMLView(DeleteView):
model = RawHTML
class DeleteSingleProductView(DeleteView):
model = SingleProduct
class DeleteImageView(DeleteView):
model = Image
class DeleteAutomaticProductListView(DeleteView):
model = AutomaticProductList
class DeleteHandPickedProductListView(DeleteView):
model = HandPickedProductList
| bsd-3-clause | -8,248,384,469,763,867,000 | 33.47546 | 136 | 0.631551 | false |
zeth/scripts | files.py | 1 | 2192 | #!/usr/bin/env python
"""Really lazy webpage - list of files in a directory."""
#####################################################################
# Configuration variables
#
# Path to where files are stored.
FILEPATH = './'
#
# Path to where files are publically available.
URLHIERARCHY = './'
#
# Page Title
TITLE = 'Conference Papers: By Author'
#
# Path to Optional Template files.
# If used they will replace the below template.
# Don't forget to add quote marks.
#
HEADER = None
FOOTER = None
#
#
# Default Webpage template
#
WEBPAGE = """Content-Type: text/html\n
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8" />
<title>%(title)s</title>
</head>
<body>
<h1>%(title)s</h1>
<ul>%(content)s</ul>
</body>
</html>"""
#
#####################################################################
def createlinks(fpath, upath):
"""Make a hyperlink for every file in the directory"""
import os
files = os.listdir(fpath)
links = []
for i in files:
links.append( '<li><a href="' + \
upath + i + '">' + i.split('.')[0].replace('_', ' ') + '</a></li>')
stringy = "\n".join(links)
return stringy
def loadtemplates():
"""Loads templates and constructs an HTML page."""
try:
fhead = file(HEADER)
ffoot = file(FOOTER)
webpag = """Content-Type: text/html\n
"""
webpag += fhead.read()
webpag += '<h1>%(title)s</h1>'
webpag += '<ul>%(content)s</ul>'
webpag += ffoot.read()
fhead.close()
ffoot.close()
return webpag
except IOError:
return None
def main():
"""This runs when the file is called directly or via cgi."""
webpage = WEBPAGE
if HEADER and FOOTER:
template = loadtemplates()
if template:
webpage = template
print webpage % \
{'title':TITLE, 'content': createlinks(FILEPATH, URLHIERARCHY)}
# start the ball rolling
if __name__ == "__main__":
main()
| apache-2.0 | -955,491,426,895,079,700 | 25.409639 | 75 | 0.553832 | false |
jteehan/cfme_tests | cfme/tests/infrastructure/test_providers.py | 1 | 15573 | # -*- coding: utf-8 -*-
import uuid
import fauxfactory
import pytest
from utils import error
from cfme.base.credential import Credential
from cfme.common.provider_views import (InfraProviderAddView,
InfraProvidersView,
InfraProvidersDiscoverView)
from cfme.infrastructure.provider import discover, wait_for_a_provider, InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider, RHEVMEndpoint
from cfme.infrastructure.provider.virtualcenter import VMwareProvider, VirtualCenterEndpoint
from utils import testgen, version
from utils.update import update
from utils.blockers import BZ
from cfme import test_requirements
pytest_generate_tests = testgen.generate([InfraProvider], scope="function")
@pytest.mark.tier(3)
@pytest.mark.sauce
@test_requirements.discovery
def test_empty_discovery_form_validation(appliance):
""" Tests that the flash message is correct when discovery form is empty."""
discover(None)
view = appliance.browser.create_view(InfraProvidersDiscoverView)
view.flash.assert_message('At least 1 item must be selected for discovery')
@pytest.mark.tier(3)
@pytest.mark.sauce
@test_requirements.provider_discovery
def test_discovery_cancelled_validation(appliance):
""" Tests that the flash message is correct when discovery is cancelled."""
discover(None, cancel=True)
view = appliance.browser.create_view(InfraProvidersView)
view.flash.assert_success_message('Infrastructure Providers '
'Discovery was cancelled by the user')
@pytest.mark.tier(3)
@pytest.mark.sauce
@test_requirements.provider_discovery
def test_add_cancelled_validation(appliance):
"""Tests that the flash message is correct when add is cancelled."""
prov = VMwareProvider()
prov.create(cancel=True)
view = appliance.browser.create_view(InfraProvidersView)
view.flash.assert_success_message('Add of Infrastructure Provider was cancelled by the user')
@pytest.mark.tier(3)
@pytest.mark.sauce
@test_requirements.provider_discovery
def test_type_required_validation():
"""Test to validate type while adding a provider"""
prov = InfraProvider()
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(InfraProviderAddView)
assert not view.add.active
@pytest.mark.tier(3)
@test_requirements.provider_discovery
def test_name_required_validation():
"""Tests to validate the name while adding a provider"""
endpoint = VirtualCenterEndpoint(hostname=fauxfactory.gen_alphanumeric(5))
prov = VMwareProvider(
name=None,
endpoints=endpoint)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(InfraProviderAddView)
assert view.name.help_block == "Required"
assert not view.add.active
@pytest.mark.tier(3)
@test_requirements.provider_discovery
def test_host_name_required_validation():
"""Test to validate the hostname while adding a provider"""
endpoint = VirtualCenterEndpoint(hostname=None)
prov = VMwareProvider(
name=fauxfactory.gen_alphanumeric(5),
endpoints=endpoint)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(prov.endpoints_form)
assert view.hostname.help_block == "Required"
view = prov.create_view(InfraProviderAddView)
assert not view.add.active
@pytest.mark.tier(3)
@pytest.mark.meta(blockers=[1209756])
@pytest.mark.uncollectif(lambda: version.current_version() > "5.4.0.0.24")
def test_ip_required_validation():
"""Test to validate the ip address while adding a provider"""
prov = VMwareProvider(
name=fauxfactory.gen_alphanumeric(5),
hostname=fauxfactory.gen_alphanumeric(5),
ip_address=None)
with error.expected("IP Address can't be blank"):
prov.create()
@pytest.mark.tier(3)
@test_requirements.provider_discovery
def test_name_max_character_validation(request, infra_provider):
"""Test to validate max character for name field"""
request.addfinalizer(lambda: infra_provider.delete_if_exists(cancel=False))
name = fauxfactory.gen_alphanumeric(255)
with update(infra_provider):
infra_provider.name = name
assert infra_provider.exists
@pytest.mark.tier(3)
@test_requirements.provider_discovery
def test_host_name_max_character_validation():
"""Test to validate max character for host name field"""
endpoint = VirtualCenterEndpoint(hostname=fauxfactory.gen_alphanumeric(256))
prov = VMwareProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint)
try:
prov.create()
except AssertionError:
view = prov.create_view(prov.endpoints_form)
assert view.hostname.value == prov.hostname[0:255]
@pytest.mark.tier(3)
@test_requirements.provider_discovery
def test_api_port_max_character_validation():
"""Test to validate max character for api port field"""
endpoint = RHEVMEndpoint(hostname=fauxfactory.gen_alphanumeric(5),
api_port=fauxfactory.gen_alphanumeric(16),
verify_tls=None,
ca_certs=None)
prov = RHEVMProvider(name=fauxfactory.gen_alphanumeric(5), endpoints=endpoint)
try:
prov.create()
except AssertionError:
view = prov.create_view(prov.endpoints_form)
text = view.default.api_port.value
assert text == prov.default_endpoint.api_port[0:15]
@pytest.mark.usefixtures('has_no_infra_providers')
@pytest.mark.tier(1)
@test_requirements.provider_discovery
def test_providers_discovery(request, provider):
"""Tests provider discovery
Metadata:
test_flag: crud
"""
provider.discover()
view = provider.create_view(InfraProvidersView)
view.flash.assert_success_message('Infrastructure Providers: Discovery successfully initiated')
request.addfinalizer(InfraProvider.clear_providers)
wait_for_a_provider()
@pytest.mark.uncollectif(lambda provider: provider.type == 'rhevm', 'blocker=1399622')
@pytest.mark.tier(3)
@pytest.mark.usefixtures('has_no_infra_providers')
@test_requirements.provider_discovery
def test_provider_add_with_bad_credentials(provider):
"""Tests provider add with bad credentials
Metadata:
test_flag: crud
"""
provider.default_endpoint.credentials = Credential(
principal='bad',
secret='reallybad',
verify_secret='reallybad'
)
if isinstance(provider, VMwareProvider):
error_msg = 'Cannot complete login due to an incorrect user name or password.'
elif isinstance(provider, RHEVMProvider):
error_msg = 'Credential validation was not successful: Incorrect user name or password.'
with error.expected(error_msg):
provider.create(validate_credentials=True)
@pytest.mark.usefixtures('has_no_infra_providers')
@pytest.mark.tier(1)
@test_requirements.provider_discovery
@pytest.mark.meta(blockers=[BZ(1450527, unblock=lambda provider: provider.type != 'scvmm')])
def test_provider_crud(provider):
"""Tests provider add with good credentials
Metadata:
test_flag: crud
"""
provider.create()
# Fails on upstream, all provider types - BZ1087476
provider.validate_stats(ui=True)
old_name = provider.name
with update(provider):
provider.name = str(uuid.uuid4()) # random uuid
with update(provider):
provider.name = old_name # old name
provider.delete(cancel=False)
provider.wait_for_delete()
class TestProvidersRESTAPI(object):
@pytest.yield_fixture(scope="function")
def custom_attributes(self, appliance, infra_provider):
provider = appliance.rest_api.collections.providers.get(name=infra_provider.name)
body = []
attrs_num = 2
for _ in range(attrs_num):
uid = fauxfactory.gen_alphanumeric(5)
body.append({
'name': 'ca_name_{}'.format(uid),
'value': 'ca_value_{}'.format(uid)
})
attrs = provider.custom_attributes.action.add(*body)
assert len(attrs) == attrs_num
yield attrs, provider
provider.custom_attributes.reload()
ids = [attr.id for attr in attrs]
delete_attrs = [attr for attr in provider.custom_attributes if attr.id in ids]
if delete_attrs:
provider.custom_attributes.action.delete(*delete_attrs)
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
def test_add_custom_attributes(self, appliance, custom_attributes):
"""Test adding custom attributes to provider using REST API.
Metadata:
test_flag: rest
"""
attributes, provider = custom_attributes
for attr in attributes:
record = provider.custom_attributes.get(id=attr.id)
assert appliance.rest_api.response.status_code == 200
assert record.name == attr.name
assert record.value == attr.value
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
def test_delete_custom_attributes_from_detail_post(self, appliance, custom_attributes):
"""Test deleting custom attributes from detail using POST method.
Metadata:
test_flag: rest
"""
attributes, _ = custom_attributes
for entity in attributes:
entity.action.delete.POST()
assert appliance.rest_api.response
with error.expected('ActiveRecord::RecordNotFound'):
entity.action.delete.POST()
assert appliance.rest_api.response.status_code == 404
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.meta(blockers=[BZ(1422596, forced_streams=['5.7', '5.8', 'upstream'])])
@pytest.mark.tier(3)
@test_requirements.rest
def test_delete_custom_attributes_from_detail_delete(self, appliance, custom_attributes):
"""Test deleting custom attributes from detail using DELETE method.
Metadata:
test_flag: rest
"""
attributes, _ = custom_attributes
for entity in attributes:
entity.action.delete.DELETE()
assert appliance.rest_api.response
with error.expected('ActiveRecord::RecordNotFound'):
entity.action.delete.DELETE()
assert appliance.rest_api.response.status_code == 404
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
def test_delete_custom_attributes_from_collection(self, appliance, custom_attributes):
"""Test deleting custom attributes from collection using REST API.
Metadata:
test_flag: rest
"""
attributes, provider = custom_attributes
provider.custom_attributes.action.delete(*attributes)
assert appliance.rest_api.response.status_code == 200
with error.expected('ActiveRecord::RecordNotFound'):
provider.custom_attributes.action.delete(*attributes)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
def test_delete_single_custom_attribute_from_collection(self, appliance, custom_attributes):
"""Test deleting single custom attribute from collection using REST API.
Metadata:
test_flag: rest
"""
attributes, provider = custom_attributes
attribute = attributes[0]
provider.custom_attributes.action.delete(attribute)
assert appliance.rest_api.response.status_code == 200
with error.expected('ActiveRecord::RecordNotFound'):
provider.custom_attributes.action.delete(attribute)
assert appliance.rest_api.response.status_code == 404
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_edit_custom_attributes(self, appliance, custom_attributes, from_detail):
"""Test editing custom attributes using REST API.
Metadata:
test_flag: rest
"""
attributes, provider = custom_attributes
response_len = len(attributes)
body = []
for _ in range(response_len):
uid = fauxfactory.gen_alphanumeric(5)
body.append({
'name': 'ca_name_{}'.format(uid),
'value': 'ca_value_{}'.format(uid),
'section': 'metadata'
})
if from_detail:
edited = []
for i in range(response_len):
edited.append(attributes[i].action.edit(**body[i]))
assert appliance.rest_api.response.status_code == 200
else:
for i in range(response_len):
body[i].update(attributes[i]._ref_repr())
edited = provider.custom_attributes.action.edit(*body)
assert appliance.rest_api.response.status_code == 200
assert len(edited) == response_len
for i in range(response_len):
assert edited[i].name == body[i]['name']
assert edited[i].value == body[i]['value']
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_edit_custom_attributes_bad_section(self, appliance, custom_attributes, from_detail):
"""Test that editing custom attributes using REST API and adding invalid section fails.
Metadata:
test_flag: rest
"""
attributes, provider = custom_attributes
response_len = len(attributes)
body = []
for _ in range(response_len):
body.append({'section': 'bad_section'})
if from_detail:
for i in range(response_len):
with error.expected('Api::BadRequestError'):
attributes[i].action.edit(**body[i])
assert appliance.rest_api.response.status_code == 400
else:
for i in range(response_len):
body[i].update(attributes[i]._ref_repr())
with error.expected('Api::BadRequestError'):
provider.custom_attributes.action.edit(*body)
assert appliance.rest_api.response.status_code == 400
@pytest.mark.uncollectif(lambda: version.current_version() < '5.7')
@pytest.mark.tier(3)
@test_requirements.rest
def test_add_custom_attributes_bad_section(self, appliance, infra_provider):
"""Test that adding custom attributes with invalid section
to provider using REST API fails.
Metadata:
test_flag: rest
"""
provider = appliance.rest_api.collections.providers.get(name=infra_provider.name)
uid = fauxfactory.gen_alphanumeric(5)
body = {
'name': 'ca_name_{}'.format(uid),
'value': 'ca_value_{}'.format(uid),
'section': 'bad_section'
}
with error.expected('Api::BadRequestError'):
provider.custom_attributes.action.add(body)
assert appliance.rest_api.response.status_code == 400
| gpl-2.0 | -2,294,399,882,987,095,300 | 36.707022 | 99 | 0.66474 | false |
pyandi/Toolbox | send_mail.py | 1 | 2398 | #!/usr/bin/env python2
# encoding: utf-8
"""Send Mail.
Usage:
./send_mail.py -t <M_TO> [--cc <M_CC>] [--bcc <M_BCC>] [-s <SUBJECT>] [-m <MESSAGE>] [-a <ATTACHMENTS>]
./send_mail.py -c | --config
./send_mail.py -h | --help
./send_mail.py --version
Options:
-c --config Config SMTP server.
-t <M_TO>, --to=<M_TO> Mail to address, multiple accounts are separated by semicolon(';').
--cc=<M_CC> CC, multiple accounts are separated by semicolon(';').
--bcc=<M_BCC> BCC, multiple accounts are separated by semicolon(';').
-s <SUBJECT>, --subject=<SUBJECT> Mail subject.
-m <MESSAGE>, --message=<MESSAGE> Mail Content.
-a <ATTACH>, --attach=<ATTACH> Add some attachments(by ';' separator).
-h --help Show this screen.
--version Show version.
"""
import os
import json
from docopt import docopt
from sender import Mail, Message
CONF = "smtp.json"
def run(arguments):
if arguments['--config'] or (not os.path.isfile(CONF)):
conf()
with open(CONF, 'rb') as f:
smtp = json.loads(f.read())
mail = Mail(host=smtp['server'],
username=smtp['user'],
password=smtp['password'],
port=int(smtp['port']),
fromaddr=smtp['from'])
msg = Message(arguments['--subject'],
fromaddr=smtp['from'],
body=arguments['--message'])
to = arguments['--to']
if to:
msg.to = to.split(';')
cc = arguments['--cc']
if cc:
msg.cc = cc.split(';')
bcc = arguments['--bcc']
if bcc:
msg.bcc = bcc.split(';')
atta = arguments['--attach']
if atta:
msg.attach(atta.split(';'))
mail.send(msg)
def conf():
print('SMTP SERVER CONFIG'.center(80, '*'))
conf = dict()
conf['server'] = raw_input('SMTP SERVER ADDRESS: ')
conf['port'] = raw_input('SMTP SERVER PORT: ')
conf['user'] = raw_input('USER: ')
conf['password'] = raw_input('PASSWORD: ')
conf['from'] = raw_input('FROM ADDRESS: ')
if not conf['from']:
conf['from'] = conf['user']
with open(CONF, 'wb') as f:
f.write(json.dumps(conf))
if __name__ == '__main__':
arguments = docopt(__doc__, version='Version: 0.1.1rc')
run(arguments)
| mit | 8,419,235,846,333,373,000 | 31.849315 | 107 | 0.524187 | false |
google/OctoPrint-HeaterTimeout | setup.py | 1 | 1988 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# coding=utf-8
plugin_identifier = "HeaterTimeout"
plugin_package = "octoprint_HeaterTimeout"
plugin_name = "OctoPrint-HeaterTimeout"
plugin_version = "0.0.3"
plugin_description = """OctoPrint plugin to shutdown heaters after an idle timeout"""
plugin_author = "Uriah Welcome"
plugin_author_email = "[email protected]"
plugin_url = "https://github.com/google/OctoPrint-HeaterTimeout"
plugin_license = "Apache"
plugin_additional_data = []
plugin_additional_packages = []
plugin_ignored_packages = []
additional_setup_parameters = {}
from setuptools import setup
try:
import octoprint_setuptools
except:
print("Could not import OctoPrint's setuptools, are you sure you are running that under "
"the same python installation that OctoPrint is installed under?")
import sys
sys.exit(-1)
setup_parameters = octoprint_setuptools.create_plugin_setup_parameters(
identifier=plugin_identifier,
package=plugin_package,
name=plugin_name,
version=plugin_version,
description=plugin_description,
author=plugin_author,
mail=plugin_author_email,
url=plugin_url,
license=plugin_license,
additional_packages=plugin_additional_packages,
ignored_packages=plugin_ignored_packages,
additional_data=plugin_additional_data
)
if len(additional_setup_parameters):
from octoprint.util import dict_merge
setup_parameters = dict_merge(setup_parameters, additional_setup_parameters)
setup(**setup_parameters)
| apache-2.0 | -9,215,495,089,896,338,000 | 31.590164 | 90 | 0.775151 | false |
rhinstaller/python-simpleline | simpleline/errors.py | 1 | 1120 | # Base exceptions for the Simpleline application.
#
# Base class for Simpleline Text UI framework.
#
# This file is part of Simpleline Text UI library.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Simpleline is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Simpleline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Simpleline. If not, see <https://www.gnu.org/licenses/>.
#
# This can't be moved to __init__.py because of cyclic imports error.
#
class SimplelineError(Exception):
"""Base exception for all other exceptions."""
class NothingScheduledError(SimplelineError):
"""Exception when running the loop with no screens scheduled."""
| gpl-2.0 | -7,083,077,802,950,792,000 | 35.129032 | 77 | 0.754464 | false |
deepmind/android_env | android_env/components/setup_step_interpreter_test.py | 1 | 14263 | # coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for android_env.components.setup_step_interpreter."""
from absl.testing import absltest
from android_env.components import adb_controller
from android_env.components import errors
from android_env.components import logcat_thread
from android_env.components import setup_step_interpreter
from android_env.proto import task_pb2
import mock
from google.protobuf import text_format
def _to_proto(proto_class, text):
proto = proto_class()
text_format.Parse(text, proto)
return proto
class SetupStepInterpreterTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.logcat = mock.create_autospec(logcat_thread.LogcatThread)
self.adb_controller = mock.create_autospec(adb_controller.AdbController)
def test_empty_setup_steps(self):
"""Simple test where nothing should break, and nothing should be done.
The test simply expects this test to not crash.
"""
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([])
def test_none_setup_steps(self):
"""Simple test where nothing should break, and nothing should be done.
The test simply expects this test to not crash.
"""
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
# Empty setup steps should be ignored.
interpreter.interpret([None])
def test_invalid_setup_step(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
# Empty setup steps should be ignored.
with self.assertRaises(AssertionError):
interpreter.interpret([_to_proto(task_pb2.SetupStep, '')])
def test_adb_install_apk_filesystem(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: {
install_apk: {
filesystem: {
path: "/my/favorite/dir/my_apk.apk"
}
}
}""")
])
self.adb_controller.install_apk.assert_called_once_with(
'/my/favorite/dir/my_apk.apk')
def test_adb_force_stop(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: { force_stop: { package_name: "my.app.Activity" } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.force_stop.assert_called_once_with('my.app.Activity')
def test_adb_clear_cache(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: { clear_cache: { package_name: "my.app.Activity" } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.clear_cache.assert_called_once_with('my.app.Activity')
def test_adb_grant_permissions(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: {
grant_permissions: {
package_name: "my.app.Activity"
permissions: [ "my.namespace.READ_DATA", "another.namespace.WRITE" ]
}
}""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.grant_permissions.assert_called_once_with(
'my.app.Activity',
['my.namespace.READ_DATA', 'another.namespace.WRITE'])
def test_adb_start_activity(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: {
start_activity: {
full_activity: "my.app.Activity"
extra_args: "arg1"
extra_args: "arg2"
}
}""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.start_activity.assert_called_once_with(
'my.app.Activity', ['arg1', 'arg2'])
def test_adb_single_tap(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(task_pb2.SetupStep, """
adb_call: {
tap: {
x: 321
y: 654
}
}""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.input_tap.assert_called_once_with(321, 654)
def test_adb_rotate(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
# Check landscape.
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { rotate: { orientation: LANDSCAPE_90 } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.rotate_device.assert_called_once_with(
task_pb2.AdbCall.Rotate.Orientation.LANDSCAPE_90)
self.adb_controller.reset_mock()
# Check portrait.
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { rotate: { orientation: PORTRAIT_0 } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.rotate_device.assert_called_once_with(
task_pb2.AdbCall.Rotate.Orientation.PORTRAIT_0)
self.adb_controller.reset_mock()
# Check landscape inverted.
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { rotate: { orientation: LANDSCAPE_270} }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.rotate_device.assert_called_once_with(
task_pb2.AdbCall.Rotate.Orientation.LANDSCAPE_270)
self.adb_controller.reset_mock()
# Check portrait up-side-down.
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { rotate: { orientation: PORTRAIT_180 } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.rotate_device.assert_called_once_with(
task_pb2.AdbCall.Rotate.Orientation.PORTRAIT_180)
def test_adb_press_button(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { press_button: { button: HOME } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.input_key.assert_called_once_with('KEYCODE_HOME')
self.adb_controller.reset_mock()
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
""" adb_call: { press_button: { button: BACK } }""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.input_key.assert_called_once_with('KEYCODE_BACK')
def test_adb_start_accessibility_service(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: {
start_accessibility_service: {
full_service: "my.app.AccessibilityService"
}
}""")
])
# AdbController should be called exactly once with the following arguments.
self.adb_controller.start_accessibility_service.assert_called_once_with(
'my.app.AccessibilityService')
def test_adb_start_screen_pinning(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: {
start_screen_pinning: {
full_activity: "my.app.HighlanderApp" # "There can be only one".
}
}""")
])
# AdbController should be called once with the following arguments.
self.adb_controller.start_screen_pinning.assert_called_with(
u'my.app.HighlanderApp')
@mock.patch('time.sleep')
def test_time_sleep(self, mock_sleep):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret(
[_to_proto(task_pb2.SetupStep, """sleep: { time_sec: 0.875 }""")])
assert mock_sleep.call_count == 2
mock_sleep.assert_has_calls([mock.call(0.875), mock.call(0.5)])
@mock.patch('time.sleep')
def test_wait_for_app_screen_empty_activity(self, unused_mock_sleep):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
with self.assertRaises(errors.StepCommandError):
interpreter.interpret([
_to_proto(task_pb2.SetupStep,
"""success_condition: {wait_for_app_screen: { }}""")
])
def test_wait_for_message_fail(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
self.assertRaises(errors.StepCommandError, interpreter.interpret, [
_to_proto(
task_pb2.SetupStep, """
success_condition: {
wait_for_message: {
message:'foo'
timeout_sec: 0.0001
}
}
""")
])
def test_wait_for_message_success(self):
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
# Replace `LogcatThread.add_event_listener` with one that simply calls `fn`
# right away, ignoring `event`.
def mock_add_ev_listener(event_listener):
event_listener.handler_fn('some_event', 'some_match')
self.logcat.add_event_listener.side_effect = mock_add_ev_listener
# The test checks that this command raises no AssertionError.
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
success_condition: {
wait_for_message: {
message:'foo'
timeout_sec: 1.0
}
}
""")
])
@mock.patch('time.sleep')
def test_check_install_not_installed(self, unused_mock_sleep):
self.adb_controller.is_package_installed.return_value = False
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
with self.assertRaises(errors.StepCommandError):
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
success_condition: {
check_install: {
package_name: "faz"
timeout_sec: 0.0001
}
}
""")
])
def test_check_install_installed(self):
self.adb_controller.is_package_installed.return_value = True
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
# The test checks that this command raises no AssertionError.
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
success_condition: {
check_install: {
package_name: "baz"
timeout_sec: 0.0001
}
}""")
])
self.adb_controller.is_package_installed.assert_called_once_with('baz')
def test_num_retries_failure(self):
self.adb_controller.is_package_installed.side_effect = [False] * 3
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
with self.assertRaises(errors.StepCommandError):
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
success_condition: {
check_install: {
package_name: "faz"
timeout_sec: 0.0001
}
num_retries: 3
}""")
])
# We retried 3 times after the first call, so we expect 3+1 calls.
self.assertEqual(3, self.adb_controller.is_package_installed.call_count)
@mock.patch('time.sleep')
def test_num_retries_success(self, unused_mock_sleep):
self.adb_controller.is_package_installed.side_effect = [
False, False, True, False
]
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
success_condition: {
check_install: {
package_name: "bar"
timeout_sec: 0.0001
}
num_retries: 5
}""")
])
# The check should succeed on the third try.
self.assertEqual(3, self.adb_controller.is_package_installed.call_count)
def test_retry_step(self):
self.adb_controller.is_package_installed.side_effect = [False, True]
interpreter = setup_step_interpreter.SetupStepInterpreter(
adb_controller=self.adb_controller, logcat=self.logcat)
interpreter.interpret([
_to_proto(
task_pb2.SetupStep, """
adb_call: { press_button: { button: HOME } }
success_condition: {
check_install: {
package_name: "bar"
timeout_sec: 0.0001
}
num_retries: 2
}""")
])
# We expect the check to fail twice and succeed on the third pass.
self.adb_controller.input_key.assert_has_calls(
[mock.call('KEYCODE_HOME')] * 2)
self.assertEqual(2, self.adb_controller.is_package_installed.call_count)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 3,202,464,094,311,081,000 | 34.130542 | 79 | 0.682465 | false |
ARISE-Initiative/robosuite | tests/test_environments/test_action_playback.py | 1 | 1896 | """
Test script for recording a sequence of random actions and playing them back
"""
import os
import h5py
import argparse
import random
import numpy as np
import json
import robosuite
from robosuite.controllers import load_controller_config
def test_playback():
# set seeds
random.seed(0)
np.random.seed(0)
env = robosuite.make(
"Lift",
robots=["Panda"],
controller_configs=load_controller_config(default_controller="OSC_POSE"),
has_renderer=False,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
reward_shaping=True,
control_freq=20,
)
env.reset()
# task instance
task_xml = env.sim.model.get_xml()
task_init_state = np.array(env.sim.get_state().flatten())
# trick for ensuring that we can play MuJoCo demonstrations back
# deterministically by using the recorded actions open loop
env.reset_from_xml_string(task_xml)
env.sim.reset()
env.sim.set_state_from_flattened(task_init_state)
env.sim.forward()
# random actions to play
n_actions = 100
actions = 0.1 * np.random.uniform(low=-1., high=1., size=(n_actions, env.action_spec[0].shape[0]))
# play actions
print("playing random actions...")
states = [task_init_state]
for i in range(n_actions):
env.step(actions[i])
states.append(np.array(env.sim.get_state().flatten()))
# try playback
print("attempting playback...")
env.reset()
env.reset_from_xml_string(task_xml)
env.sim.reset()
env.sim.set_state_from_flattened(task_init_state)
env.sim.forward()
for i in range(n_actions):
env.step(actions[i])
state_playback = env.sim.get_state().flatten()
assert(np.all(np.equal(states[i + 1], state_playback)))
print("test passed!")
if __name__ == "__main__":
test_playback()
| mit | 6,415,794,409,736,424,000 | 24.621622 | 102 | 0.643987 | false |
nismod/energy_demand | energy_demand/wrapper_model.py | 1 | 15386 | """All necessary calculations in pre_simulate()
"""
import os
import logging
from energy_demand.read_write import data_loader
from energy_demand.basic import basic_functions
from energy_demand.scripts import init_scripts
from energy_demand.read_write import write_data
from energy_demand.assumptions import general_assumptions
from energy_demand.validation import lad_validation
from energy_demand.scripts import s_disaggregation
from energy_demand.basic import demand_supply_interaction
from energy_demand.read_write import read_data
from energy_demand.technologies import tech_related
from energy_demand.scripts import s_scenario_param
from energy_demand.geography import weather_region
from energy_demand.basic import testing_functions
def load_data_before_simulation(
data,
sim_yrs,
config,
curr_yr
):
# ---------
# Configuration
# -----------
base_yr = config['CONFIG']['base_yr']
weather_yr_scenario = config['CONFIG']['weather_yr_scenario']
path_new_scenario = config['PATHS']['path_new_scenario']
data['data_path'] = os.path.normpath(config['PATHS']['path_local_data'])
data['processed_path'] = os.path.normpath(config['PATHS']['path_processed_data'])
data['result_path'] = os.path.normpath(config['PATHS']['path_result_data'])
data['paths'] = config['CONFIG_DATA']
# Downloaded (FTP) data
data['local_paths'] = config['DATA_PATHS']
# ------------------------------------------------
# Load Inputs
# ------------------------------------------------
data['enduses'], data['sectors'], data['fuels'], lookup_enduses, \
lookup_sector_enduses = data_loader.load_fuels(config['CONFIG_DATA'])
# ------------------------------------------------
# Load Assumptions
# ------------------------------------------------
data['assumptions'] = general_assumptions.Assumptions(
lookup_enduses=lookup_enduses,
lookup_sector_enduses=lookup_sector_enduses,
base_yr=base_yr,
weather_by=config['CONFIG']['user_defined_weather_by'],
simulation_end_yr=config['CONFIG']['user_defined_simulation_end_yr'],
curr_yr=curr_yr,
sim_yrs=sim_yrs,
paths=config['CONFIG_DATA'],
enduses=data['enduses'],
sectors=data['sectors'],
reg_nrs=len(data['regions']))
# ------------------------------------------
# Make selection of regions to model
# ------------------------------------------
if config['CRITERIA']['reg_selection']:
region_selection = read_data.get_region_selection(
os.path.join(config['DATA_PATHS']['local_path_datafolder'],
"region_definitions",
config['CRITERIA']['reg_selection_csv_name']))
#region_selection = ['E02003237', 'E02003238']
setattr(data['assumptions'], 'reg_nrs', len(region_selection))
else:
region_selection = data['regions']
# Create .ini file with simulation parameter
write_data.write_simulation_inifile(
path_new_scenario, data, region_selection)
# -------------------------------------------
# Weather year specific initialisations
# -------------------------------------------
path_folder_weather_yr = os.path.join(
os.path.join(path_new_scenario, str(weather_yr_scenario) + "__" + "all_stations"))
data['weather_result_paths'] = basic_functions.get_result_paths(path_folder_weather_yr)
folders_to_create = [
path_folder_weather_yr,
data['weather_result_paths']['data_results'],
data['weather_result_paths']['data_results_PDF'],
data['weather_result_paths']['data_results_validation'],
data['weather_result_paths']['data_results_model_run_results_txt']]
for folder in folders_to_create:
basic_functions.create_folder(folder)
# ------------------------------------------------
# Load load profiles of technologies
# ------------------------------------------------
data['tech_lp'] = data_loader.load_data_profiles(
config['CONFIG_DATA'],
config['DATA_PATHS'],
data['assumptions'].model_yeardays,
data['assumptions'].model_yeardays_daytype)
# Obtain population data for disaggregation
if config['CRITERIA']['msoa_crit']:
name_population_dataset = config['DATA_PATHS']['path_population_data_for_disaggregation_msoa']
else:
name_population_dataset = config['DATA_PATHS']['path_population_data_for_disaggregation_lad']
data['pop_for_disag'] = data_loader.read_scenario_data(
name_population_dataset,
region_name='region',
value_name='value')
# ------------------------------------------------
# Load building related data
# ------------------------------------------------
if config['CRITERIA']['virtual_building_stock_criteria']:
data['scenario_data']['floor_area']['rs_floorarea'], data['scenario_data']['floor_area']['ss_floorarea'], data['service_building_count'], rs_regions_without_floorarea, ss_regions_without_floorarea = data_loader.floor_area_virtual_dw(
data['regions'],
data['sectors'],
config['DATA_PATHS'],
data['scenario_data']['population'][data['assumptions'].base_yr],
base_yr=data['assumptions'].base_yr)
# Add all areas with no floor area data
data['assumptions'].update("rs_regions_without_floorarea", rs_regions_without_floorarea)
data['assumptions'].update("ss_regions_without_floorarea", ss_regions_without_floorarea)
else:
# ------------------------------------------------
# Load floor area directly from scenario
# ------------------------------------------------
data['scenario_data']['floor_area']['rs_floorarea'] = {}
data['scenario_data']['floor_area']['rs_floorarea'] = data['scenario_data']['rs_floorarea']
data['scenario_data']['floor_area']['ss_floorarea'] = data['scenario_data']['ss_floorarea']
data['scenario_data']['service_building_count'][data['assumptions'].base_yr] = {}
return data
def before_simulation(
data,
config,
sim_yrs,
pop_density,
service_switches,
fuel_switches,
capacity_switches
):
"""
"""
# ------------------------------------------------
# Disaggregate national energy demand to regional demands
# ------------------------------------------------
fuel_disagg = s_disaggregation.disaggr_demand(
data,
config['CRITERIA']['crit_temp_min_max'],
spatial_calibration=config['CRITERIA']['spatial_calibration'])
# ------------------------------------------------
# Calculate spatial diffusion factors
#
# Here the real values used for the spatial disaggregation (speec_con_max)
# need to be defined. If not population density is used,
# this needs to be replaced by any other values which are loaded from
# a csv file in the form of: {{region_name: value}}
# ------------------------------------------------
f_reg, f_reg_norm, f_reg_norm_abs, crit_all_the_same = init_scripts.create_spatial_diffusion_factors(
narrative_spatial_explicit_diffusion=data['assumptions'].strategy_vars['spatial_explicit_diffusion'],
fuel_disagg=fuel_disagg,
regions=data['regions'],
real_values=pop_density,
narrative_speed_con_max=data['assumptions'].strategy_vars['speed_con_max'])
# ------------------------------------------------
# Calculate parameter values for every region
# ------------------------------------------------
regional_vars = init_scripts.spatial_explicit_modelling_strategy_vars(
data['assumptions'].strategy_vars,
data['assumptions'].spatially_modelled_vars,
data['regions'],
fuel_disagg,
f_reg,
f_reg_norm,
f_reg_norm_abs)
data['assumptions'].update('strategy_vars', regional_vars)
# ------------------------------------------------
# Calculate parameter values for every simulated year
# based on narratives. Also calculate annual parameters for
# technologies diffused by switches.
# ------------------------------------------------
regional_vars, non_regional_vars = s_scenario_param.generate_annual_param_vals(
data['regions'],
data['assumptions'].strategy_vars,
sim_yrs)
# ------------------------------------------------
# Switches calculations
# ------------------------------------------------
# Update assumptions
crit_switch_happening = testing_functions.switch_testing(
fuel_switches=fuel_switches,
service_switches=service_switches,
capacity_switches=capacity_switches)
setattr(data['assumptions'],'crit_switch_happening', crit_switch_happening)
annual_tech_diff_params = init_scripts.switch_calculations(
sim_yrs,
data,
f_reg,
f_reg_norm,
f_reg_norm_abs,
crit_all_the_same,
service_switches=service_switches,
fuel_switches=fuel_switches,
capacity_switches=capacity_switches)
for region in data['regions']:
regional_vars[region]['annual_tech_diff_params'] = annual_tech_diff_params[region]
return regional_vars, non_regional_vars, fuel_disagg, crit_switch_happening
def write_user_defined_results(
criterias,
result_paths,
sim_obj,
data,
curr_yr,
region_selection,
pop_array_cy
):
"""
Write annual results to files
"""
logging.info("... Start writing results to file")
if criterias['write_txt_additional_results']:
# Write population data to file
write_data.write_scenaric_population_data(
curr_yr,
result_paths['data_results_model_run_pop'],
pop_array_cy)
# Write full results (Note: Results in very large data written to file)
##write_data.write_full_results(
## data_handle.current_timestep,
## os.path.join(result_path, 'model_run_results_txt'),
## sim_obj.ed_enduse_fueltype_regs_yh,
## "out_enduse_specific")
write_data.write_supply_results(
curr_yr,
"ed_fueltype_regs_yh",
result_paths['data_results_model_run_pop'],
sim_obj.ed_fueltype_regs_yh,
"result_tot_submodels_fueltypes")
write_data.write_enduse_specific(
curr_yr,
result_paths['data_results_model_run_results_txt'],
sim_obj.tot_fuel_y_enduse_specific_yh,
"out_enduse_specific")
write_data.write_lf(
result_paths['data_results_model_run_results_txt'],
"result_reg_load_factor_y",
[curr_yr], sim_obj.reg_load_factor_y, 'reg_load_factor_y')
write_data.write_lf(
result_paths['data_results_model_run_results_txt'],
"result_reg_load_factor_yd",
[curr_yr], sim_obj.reg_load_factor_yd, 'reg_load_factor_yd')
# ----------------------------------------------------------------------------------------
# Write out national demand for every fueltype (used for first sending of demand data)
# ----------------------------------------------------------------------------------------
if criterias['write_out_national']:
# Write out gas
demand_supply_interaction.write_national_results(
path_folder=result_paths['data_results'],
results_unconstrained=sim_obj.results_unconstrained,
enduse_specific_results=sim_obj.tot_fuel_y_enduse_specific_yh,
fueltype_str='gas',
fuelype_nr=tech_related.get_fueltype_int('gas'),
year=curr_yr,
submodels_names=data['assumptions'].submodels_names)
# Write out elec
demand_supply_interaction.write_national_results(
path_folder=result_paths['data_results'],
results_unconstrained=sim_obj.results_unconstrained,
enduse_specific_results=sim_obj.tot_fuel_y_enduse_specific_yh,
fueltype_str='electricity',
fuelype_nr=tech_related.get_fueltype_int('electricity'),
year=curr_yr,
submodels_names=data['assumptions'].submodels_names)
# ------------------------------------------------
# Temporal Validation
# ------------------------------------------------
if (criterias['validation_criteria'] == True) and (
curr_yr == data['assumptions'].base_yr) and (['cluster_calc'] != True):
lad_validation.spatio_temporal_val(
sim_obj.ed_fueltype_national_yh,
sim_obj.ed_fueltype_regs_yh,
result_paths,
data['paths'],
region_selection,
data['assumptions'].seasons,
data['assumptions'].model_yeardays_daytype,
plot_crit=False)
def load_gva_sector(
data_handle,
regions,
sectors_to_load,
msoa_crit,
simulate=False
):
"""Load sector specific GVA
Arguments
---------
data_handle : object
Data handler
pop_array : array
Population
regions : list
Regions
sectors_to_load : list
Sectors which are loaded
msoa_crit : bool
Criteria wheter modelled on MSOA level or LAD level
simulate : bool
Criteria wheter run in simulate() or not
"""
sector_data = {}
if msoa_crit:
logging.info("Don't load sector GVA {}")
else:
for gva_sector_nr in sectors_to_load:
try:
logging.info("... Loading GVA data for sector_Nr {}".format(gva_sector_nr))
if simulate:
gva_sector_data = data_handle.get_data(
'gva_per_head_sector__{}'.format(gva_sector_nr))
else:
gva_sector_data = data_handle.get_base_timestep_data(
'gva_per_head_sector__{}'.format(gva_sector_nr))
sector_data[gva_sector_nr] = basic_functions.assign_array_to_dict(
gva_sector_data, regions)
except KeyError:
# In case no data could be loaded, generate constant dummy data
raise Exception("Could not load data %s", 'gva_per_head_sector__{}'.format(gva_sector_nr))
return sector_data
def plots(
data,
curr_yr,
fuel_disagg,
config
):
"""
"""
# Spatial validation
if (config['CRITERIA']['validation_criteria'] == True) and (
curr_yr == data['assumptions'].base_yr) and (
config['CRITERIA']['cluster_calc'] != True):
lad_validation.spatial_validation_lad_level(
fuel_disagg,
data['result_paths'],
data['paths'],
data['regions'],
data['reg_coord'],
plot_crit=False)
# Plot map with weather station
if config['CRITERIA']['cluster_calc'] != True:
data_loader.create_weather_station_map(
data['weather_stations'],
os.path.join(data['result_path'], 'weatherst_distr_weathyr_{}.pdf'.format(
config['CONFIG']['weather_yr_scenario'])),
path_shapefile=config['DATA_PATHS']['lad_shapefile'])
| mit | 147,847,713,708,952,770 | 38.860104 | 241 | 0.559145 | false |
mamontov-cpp/dukpp-03 | duktape/tools/extract_caseconv.py | 6 | 25764 | #!/usr/bin/env python2
#
# Extract rules for Unicode case conversion, specifically the behavior
# required by ECMAScript E5 in Sections 15.5.4.16 to 15.5.4.19. The
# bitstream encoded rules are used for the slow path at run time, so
# compactness is favored over speed.
#
# There is no support for context or locale sensitive rules, as they
# are handled directly in C code before consulting tables generated
# here. ECMAScript requires case conversion both with and without
# locale/language specific rules (e.g. String.prototype.toLowerCase()
# and String.prototype.toLocaleLowerCase()), so they are best handled
# in C anyway.
#
# Case conversion rules for ASCII are also excluded as they are handled
# by the C fast path. Rules for non-BMP characters (codepoints above
# U+FFFF) are omitted as they're not required for standard ECMAScript.
#
import os
import sys
import re
import math
import optparse
import dukutil
class UnicodeData:
"""Read UnicodeData.txt into an internal representation."""
def __init__(self, filename):
self.data = self.read_unicode_data(filename)
print('read %d unicode data entries' % len(self.data))
def read_unicode_data(self, filename):
res = []
f = open(filename, 'rb')
for line in f:
if line.startswith('#'):
continue
line = line.strip()
if line == '':
continue
parts = line.split(';')
if len(parts) != 15:
raise Exception('invalid unicode data line')
res.append(parts)
f.close()
# Sort based on Unicode codepoint.
def mycmp(a,b):
return cmp(long(a[0], 16), long(b[0], 16))
res.sort(cmp=mycmp)
return res
class SpecialCasing:
"""Read SpecialCasing.txt into an internal representation."""
def __init__(self, filename):
self.data = self.read_special_casing_data(filename)
print('read %d special casing entries' % len(self.data))
def read_special_casing_data(self, filename):
res = []
f = open(filename, 'rb')
for line in f:
try:
idx = line.index('#')
line = line[:idx]
except ValueError:
pass
line = line.strip()
if line == '':
continue
parts = line.split(';')
parts = [i.strip() for i in parts]
while len(parts) < 6:
parts.append('')
res.append(parts)
f.close()
return res
def parse_unicode_sequence(x):
"""Parse a Unicode sequence like ABCD 1234 into a unicode string."""
res = ''
for i in x.split(' '):
i = i.strip()
if i == '':
continue
res += unichr(long(i, 16))
return res
def get_base_conversion_maps(unicode_data):
"""Create case conversion tables without handling special casing yet."""
uc = {} # uppercase, codepoint (number) -> string
lc = {} # lowercase
tc = {} # titlecase
for x in unicode_data.data:
c1 = long(x[0], 16)
# just 16-bit support needed
if c1 >= 0x10000:
continue
if x[12] != '':
# field 12: simple uppercase mapping
c2 = parse_unicode_sequence(x[12])
uc[c1] = c2
tc[c1] = c2 # titlecase default == uppercase, overridden below if necessary
if x[13] != '':
# field 13: simple lowercase mapping
c2 = parse_unicode_sequence(x[13])
lc[c1] = c2
if x[14] != '':
# field 14: simple titlecase mapping
c2 = parse_unicode_sequence(x[14])
tc[c1] = c2
return uc, lc, tc
def update_special_casings(uc, lc, tc, special_casing):
"""Update case conversion tables with special case conversion rules."""
for x in special_casing.data:
c1 = long(x[0], 16)
if x[4] != '':
# conditions
continue
lower = parse_unicode_sequence(x[1])
title = parse_unicode_sequence(x[2])
upper = parse_unicode_sequence(x[3])
if len(lower) > 1:
lc[c1] = lower
if len(upper) > 1:
uc[c1] = upper
if len(title) > 1:
tc[c1] = title
print('- special case: %d %d %d' % (len(lower), len(upper), len(title)))
def remove_ascii_part(convmap):
"""Remove ASCII case conversion parts (handled by C fast path)."""
for i in xrange(128):
if convmap.has_key(i):
del convmap[i]
def scan_range_with_skip(convmap, start_idx, skip):
"""Scan for a range of continuous case conversion with a certain 'skip'."""
conv_i = start_idx
if not convmap.has_key(conv_i):
return None, None, None
elif len(convmap[conv_i]) > 1:
return None, None, None
else:
conv_o = ord(convmap[conv_i])
start_i = conv_i
start_o = conv_o
while True:
new_i = conv_i + skip
new_o = conv_o + skip
if not convmap.has_key(new_i):
break
if len(convmap[new_i]) > 1:
break
if ord(convmap[new_i]) != new_o:
break
conv_i = new_i
conv_o = new_o
# [start_i,conv_i] maps to [start_o,conv_o], ignore ranges of 1 char.
count = (conv_i - start_i) / skip + 1
if count <= 1:
return None, None, None
# We have an acceptable range, remove them from the convmap here.
for i in xrange(start_i, conv_i + skip, skip):
del convmap[i]
return start_i, start_o, count
def find_first_range_with_skip(convmap, skip):
"""Find first range with a certain 'skip' value."""
for i in xrange(65536):
start_i, start_o, count = scan_range_with_skip(convmap, i, skip)
if start_i is None:
continue
return start_i, start_o, count
return None, None, None
def generate_caseconv_tables(convmap):
"""Generate bit-packed case conversion table for a given conversion map."""
# The bitstream encoding is based on manual inspection for whatever
# regularity the Unicode case conversion rules have.
#
# Start with a full description of case conversions which does not
# cover all codepoints; unmapped codepoints convert to themselves.
# Scan for range-to-range mappings with a range of skips starting from 1.
# Whenever a valid range is found, remove it from the map. Finally,
# output the remaining case conversions (1:1 and 1:n) on a per codepoint
# basis.
#
# This is very slow because we always scan from scratch, but its the
# most reliable and simple way to scan
print('generate caseconv tables')
ranges = [] # range mappings (2 or more consecutive mappings with a certain skip)
singles = [] # 1:1 character mappings
multis = [] # 1:n character mappings
# Ranges with skips
for skip in xrange(1,6+1): # skips 1...6 are useful
while True:
start_i, start_o, count = find_first_range_with_skip(convmap, skip)
if start_i is None:
break
print('- skip %d: %d %d %d' % (skip, start_i, start_o, count))
ranges.append([start_i, start_o, count, skip])
# 1:1 conversions
k = convmap.keys()
k.sort()
for i in k:
if len(convmap[i]) > 1:
continue
singles.append([i, ord(convmap[i])]) # codepoint, codepoint
del convmap[i]
# There are many mappings to 2-char sequences with latter char being U+0399.
# These could be handled as a special case, but we don't do that right now.
#
# [8064L, u'\u1f08\u0399']
# [8065L, u'\u1f09\u0399']
# [8066L, u'\u1f0a\u0399']
# [8067L, u'\u1f0b\u0399']
# [8068L, u'\u1f0c\u0399']
# [8069L, u'\u1f0d\u0399']
# [8070L, u'\u1f0e\u0399']
# [8071L, u'\u1f0f\u0399']
# ...
#
# tmp = {}
# k = convmap.keys()
# k.sort()
# for i in k:
# if len(convmap[i]) == 2 and convmap[i][1] == u'\u0399':
# tmp[i] = convmap[i][0]
# del convmap[i]
# print(repr(tmp))
#
# skip = 1
# while True:
# start_i, start_o, count = find_first_range_with_skip(tmp, skip)
# if start_i is None:
# break
# print('- special399, skip %d: %d %d %d' % (skip, start_i, start_o, count))
# print(len(tmp.keys()))
# print(repr(tmp))
# XXX: need to put 12 remaining mappings back to convmap
# 1:n conversions
k = convmap.keys()
k.sort()
for i in k:
multis.append([i, convmap[i]]) # codepoint, string
del convmap[i]
for t in singles:
print '- singles: ' + repr(t)
for t in multis:
print '- multis: ' + repr(t)
print '- range mappings: %d' % len(ranges)
print '- single character mappings: %d' % len(singles)
print '- complex mappings (1:n): %d' % len(multis)
print '- remaining (should be zero): %d' % len(convmap.keys())
# XXX: opportunities for diff encoding skip=3 ranges?
prev = None
for t in ranges:
# range: [start_i, start_o, count, skip]
if t[3] != 3:
continue
if prev is not None:
print '- %d %d' % (t[0] - prev[0], t[1] - prev[1])
else:
print '- start: %d %d' % (t[0], t[1])
prev = t
# Bit packed encoding.
be = dukutil.BitEncoder()
for curr_skip in xrange(1, 7): # 1...6
count = 0
for r in ranges:
start_i, start_o, r_count, skip = r[0], r[1], r[2], r[3]
if skip != curr_skip:
continue
count += 1
be.bits(count, 6)
print('- encode: skip=%d, count=%d' % (curr_skip, count))
for r in ranges:
start_i, start_o, r_count, skip = r[0], r[1], r[2], r[3]
if skip != curr_skip:
continue
be.bits(start_i, 16)
be.bits(start_o, 16)
be.bits(r_count, 7)
be.bits(0x3f, 6) # maximum count value = end of skips
count = len(singles)
be.bits(count, 7)
for t in singles:
cp_i, cp_o = t[0], t[1]
be.bits(cp_i, 16)
be.bits(cp_o, 16)
count = len(multis)
be.bits(count, 7)
for t in multis:
cp_i, str_o = t[0], t[1]
be.bits(cp_i, 16)
be.bits(len(str_o), 2)
for i in xrange(len(str_o)):
be.bits(ord(str_o[i]), 16)
return be.getBytes(), be.getNumBits()
def generate_regexp_canonicalize_tables(convmap):
"""Generate tables for case insensitive RegExp normalization."""
# Generate a direct codepoint lookup for canonicalizing BMP range.
def generate_canontab():
res = []
highest_nonid = -1
for cp in xrange(65536):
res_cp = cp # default to as is
if convmap.has_key(cp):
tmp = convmap[cp]
if len(tmp) == 1:
# If multiple codepoints from input, ignore.
res_cp = ord(tmp[0])
if cp >= 0x80 and res_cp < 0x80:
res_cp = cp # If non-ASCII mapped to ASCII, ignore.
if cp != res_cp:
highest_nonid = cp
res.append(res_cp)
# At the moment this is 65370, which means there's very little
# gain in assuming 1:1 mapping above a certain BMP codepoint
# (though we do assume 1:1 mapping for above BMP codepoints).
print('- highest non-identity mapping: %d' % highest_nonid)
return res
print('generate canontab')
canontab = generate_canontab()
# Figure out which BMP values are never the result of canonicalization.
# Such codepoints are "don't care" in the sense that they are never
# matched against at runtime: ranges are canonicalized at compile time,
# and codepoint being matched is also canonicalized at run time.
# (Currently unused.)
def generate_dontcare():
res = [ True ] * 65536
for cp in canontab:
res[cp] = False
res_count = 0
for x in res:
if x:
res_count += 1
print('- %d dontcare codepoints' % res_count)
return res
print('generate canon dontcare')
dontcare = generate_dontcare()
# Generate maximal continuous ranges for canonicalization. A continuous
# range is a sequence with N codepoints where IN+i canonicalizes to OUT+i
# for fixed IN, OUT, and i in 0...N-1. There are unfortunately >1000
# of these ranges, mostly because there are a lot of individual exceptions.
# (Currently unused.)
canon_ranges = []
for cp in xrange(65536):
canon_ranges.append([ cp, canontab[cp], 1 ]) # 1 codepoint ranges at first
def merge_compatible_nogap(rng1, rng2):
# Merge adjacent ranges if continuity allows.
if rng1[0] + rng1[2] == rng2[0] and \
rng1[1] + rng1[2] == rng2[1]:
return [ rng1[0], rng1[1], rng1[2] + rng2[2] ]
return None
def merge_check_nogap():
len_start = len(canon_ranges)
for i in xrange(len(canon_ranges) - 1):
j = i + 1
rng1 = canon_ranges[i]
rng2 = canon_ranges[j]
if rng1 is None or rng2 is None: continue
merged = merge_compatible_nogap(rng1, rng2)
if merged is not None:
canon_ranges[j] = None
canon_ranges[i] = merged
filtered = []
for x in canon_ranges:
if x is not None:
filtered.append(x)
len_end = len(filtered)
if len_end < len_start:
return filtered
return None
print('generate canon_ranges')
while True:
# Starting from individual ranges of 1 codepoint, merge adjacent
# ranges until no more ranges can be merged.
t = merge_check_nogap()
if t is None:
break
canon_ranges = t
print('- %d ranges' % len(canon_ranges))
#for rng in canon_ranges:
# print('canon_ranges:')
# print(repr(rng))
# Generate true/false ranges for BMP codepoints where:
# - A codepoint is flagged true if continuity is broken at that point, so
# an explicit codepoint canonicalization is needed at runtime.
# - A codepoint is flagged false if case conversion is continuous from the
# previous codepoint, i.e. out_curr = out_prev + 1.
#
# The result is a lot of small ranges due to a lot of small 'false' ranges.
# Reduce the range set by checking if adjacent 'true' ranges have at most
# false_limit 'false' entries between them. If so, force the 'false'
# entries to 'true' (safe but results in an unnecessary runtime codepoint
# lookup) and merge the three ranges into a larger 'true' range.
#
# (Currently unused.)
def generate_needcheck_straight():
res = [ True ] * 65536
assert(canontab[0] == 0) # can start from in == out == 0
prev_in = -1
prev_out = -1
for i in xrange(65536):
# First create a straight true/false bitmap for BMP.
curr_in = i
curr_out = canontab[i]
if prev_in + 1 == curr_in and prev_out + 1 == curr_out:
res[i] = False
prev_in = curr_in
prev_out = curr_out
return res
def generate_needcheck_ranges(data):
# Generate maximal accurate ranges.
prev = None
count = 0
ranges = []
for i in data:
if prev is None or prev != i:
if prev is not None:
ranges.append([ prev, count ])
prev = i
count = 1
else:
count += 1
if prev is not None:
ranges.append([ prev, count ])
return ranges
def fillin_needcheck_ranges(data, false_limit):
# Fill in TRUE-FALSE*N-TRUE gaps into TRUE-TRUE*N-TRUE which is
# safe (leads to an unnecessary runtime check) but reduces
# range data size considerably.
res = []
for r in data:
res.append([ r[0], r[1] ])
while True:
found = False
for i in xrange(len(res) - 2):
r1 = res[i]
r2 = res[i + 1]
r3 = res[i + 2]
if r1[0] == True and r2[0] == False and r3[0] == True and \
r2[1] <= false_limit:
#print('fillin %d falses' % r2[1])
res.pop(i + 2)
res.pop(i + 1)
res[i] = [ True, r1[1] + r2[1] + r3[1] ]
found = True
break
if not found:
break
return res
print('generate needcheck straight')
needcheck = generate_needcheck_straight()
print('generate needcheck without false fillins')
needcheck_ranges1 = generate_needcheck_ranges(needcheck)
print('- %d ranges' % len(needcheck_ranges1))
#print(needcheck_ranges1)
print('generate needcheck with false fillins')
needcheck_ranges2 = fillin_needcheck_ranges(needcheck_ranges1, 11)
print('- %d ranges' % len(needcheck_ranges2))
#print(needcheck_ranges2)
# Generate a bitmap for BMP, divided into N-codepoint blocks, with each
# bit indicating: "entire codepoint block canonicalizes continuously, and
# the block is continuous with the previous and next block". A 'true'
# entry allows runtime code to just skip the block, advancing 'in' and
# 'out' by the block size, with no codepoint conversion. The block size
# should be large enough to produce a relatively small lookup table, but
# small enough to reduce codepoint conversions to a manageable number
# because the conversions are (currently) quite slow. This matters
# especially for case-insensitive RegExps; without any optimization,
# /[\u0000-\uffff]/i requires 65536 case conversions for runtime
# normalization.
block_shift = 5
block_size = 1 << block_shift
block_mask = block_size - 1
num_blocks = 65536 / block_size
def generate_block_bits(check_continuity):
res = [ True ] * num_blocks
for i in xrange(num_blocks):
base_in = i * block_size
base_out = canontab[base_in]
if check_continuity:
lower = -1 # [-1,block_size]
upper = block_size + 1
else:
lower = 0 # [0,block_size-1]
upper = block_size
for j in xrange(lower, upper):
cp = base_in + j
if cp >= 0x0000 and cp <= 0xffff and canontab[cp] != base_out + j:
res[i] = False
break
return res
def dump_block_bitmap(bits):
tmp = ''.join([ ({ True: 'x', False: '.' })[b] for b in bits])
tmp = re.sub(r'.{64}', lambda x: x.group(0) + '\n', tmp)
blocks_true = tmp.count('x')
blocks_false = tmp.count('.')
print('%d codepoint blocks are continuous, %d blocks are not' % (blocks_true, blocks_false))
sys.stdout.write(tmp)
#print(bits)
def dump_test_lookup(bits):
sys.stdout.write('duk_uint8_t test = {');
for b in bits:
if b:
sys.stdout.write('1,')
else:
sys.stdout.write('0,')
sys.stdout.write('};\n')
def convert_to_bitmap(bits):
# C code looks up bits as:
# index = codepoint >> N
# bitnum = codepoint & mask
# bitmask = 1 << bitnum
# So block 0 is mask 0x01 of first byte, block 1 is mask 0x02 of
# first byte, etc.
res = []
curr = 0
mask = 0x01
for b in bits:
if b:
curr += mask
mask = mask * 2
if mask == 0x100:
res.append(curr)
curr = 0
mask = 0x01
assert(mask == 0x01) # no leftover
return res
print('generate canon block bitmap without continuity')
block_bits1 = generate_block_bits(False)
dump_block_bitmap(block_bits1)
dump_test_lookup(block_bits1)
print('generate canon block bitmap with continuity')
block_bits2 = generate_block_bits(True)
dump_block_bitmap(block_bits2)
dump_test_lookup(block_bits2)
print('generate final canon bitmap')
block_bitmap = convert_to_bitmap(block_bits2)
print('- %d bytes' % len(block_bitmap))
print('- ' + repr(block_bitmap))
canon_bitmap = {
'data': block_bitmap,
'block_size': block_size,
'block_shift': block_shift,
'block_mask': block_mask
}
# This is useful to figure out corner case test cases.
print('canon blocks which are different with and without continuity check')
for i in xrange(num_blocks):
if block_bits1[i] != block_bits2[i]:
print('- block %d ([%d,%d]) differs' % (i, i * block_size, i * block_size + block_size - 1))
return canontab, canon_bitmap
def clonedict(x):
"Shallow clone of input dict."
res = {}
for k in x.keys():
res[k] = x[k]
return res
def main():
parser = optparse.OptionParser()
parser.add_option('--command', dest='command', default='caseconv_bitpacked')
parser.add_option('--unicode-data', dest='unicode_data')
parser.add_option('--special-casing', dest='special_casing')
parser.add_option('--out-source', dest='out_source')
parser.add_option('--out-header', dest='out_header')
parser.add_option('--table-name-lc', dest='table_name_lc', default='caseconv_lc')
parser.add_option('--table-name-uc', dest='table_name_uc', default='caseconv_uc')
parser.add_option('--table-name-re-canon-lookup', dest='table_name_re_canon_lookup', default='caseconv_re_canon_lookup')
parser.add_option('--table-name-re-canon-bitmap', dest='table_name_re_canon_bitmap', default='caseconv_re_canon_bitmap')
(opts, args) = parser.parse_args()
unicode_data = UnicodeData(opts.unicode_data)
special_casing = SpecialCasing(opts.special_casing)
uc, lc, tc = get_base_conversion_maps(unicode_data)
update_special_casings(uc, lc, tc, special_casing)
if opts.command == 'caseconv_bitpacked':
# XXX: ASCII and non-BMP filtering could be an option but is now hardcoded
# ASCII is handled with 'fast path' so not needed here.
t = clonedict(uc)
remove_ascii_part(t)
uc_bytes, uc_nbits = generate_caseconv_tables(t)
t = clonedict(lc)
remove_ascii_part(t)
lc_bytes, lc_nbits = generate_caseconv_tables(t)
# Generate C source and header files.
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitArray(uc_bytes, opts.table_name_uc, size=len(uc_bytes), typename='duk_uint8_t', intvalues=True, const=True)
genc.emitArray(lc_bytes, opts.table_name_lc, size=len(lc_bytes), typename='duk_uint8_t', intvalues=True, const=True)
f = open(opts.out_source, 'wb')
f.write(genc.getString())
f.close()
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitLine('extern const duk_uint8_t %s[%d];' % (opts.table_name_uc, len(uc_bytes)))
genc.emitLine('extern const duk_uint8_t %s[%d];' % (opts.table_name_lc, len(lc_bytes)))
f = open(opts.out_header, 'wb')
f.write(genc.getString())
f.close()
elif opts.command == 're_canon_lookup':
# Direct canonicalization lookup for case insensitive regexps, includes ascii part.
t = clonedict(uc)
re_canon_lookup, re_canon_bitmap = generate_regexp_canonicalize_tables(t)
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitArray(re_canon_lookup, opts.table_name_re_canon_lookup, size=len(re_canon_lookup), typename='duk_uint16_t', intvalues=True, const=True)
f = open(opts.out_source, 'wb')
f.write(genc.getString())
f.close()
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitLine('extern const duk_uint16_t %s[%d];' % (opts.table_name_re_canon_lookup, len(re_canon_lookup)))
f = open(opts.out_header, 'wb')
f.write(genc.getString())
f.close()
elif opts.command == 're_canon_bitmap':
# N-codepoint block bitmap for skipping continuous codepoint blocks
# quickly.
t = clonedict(uc)
re_canon_lookup, re_canon_bitmap = generate_regexp_canonicalize_tables(t)
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitArray(re_canon_bitmap['data'], opts.table_name_re_canon_bitmap, size=len(re_canon_bitmap['data']), typename='duk_uint8_t', intvalues=True, const=True)
f = open(opts.out_source, 'wb')
f.write(genc.getString())
f.close()
genc = dukutil.GenerateC()
genc.emitHeader('extract_caseconv.py')
genc.emitDefine('DUK_CANON_BITMAP_BLKSIZE', re_canon_bitmap['block_size'])
genc.emitDefine('DUK_CANON_BITMAP_BLKSHIFT', re_canon_bitmap['block_shift'])
genc.emitDefine('DUK_CANON_BITMAP_BLKMASK', re_canon_bitmap['block_mask'])
genc.emitLine('extern const duk_uint8_t %s[%d];' % (opts.table_name_re_canon_bitmap, len(re_canon_bitmap['data'])))
f = open(opts.out_header, 'wb')
f.write(genc.getString())
f.close()
else:
raise Exception('invalid command: %r' % opts.command)
if __name__ == '__main__':
main()
| mit | 6,939,381,313,722,262,000 | 34.148704 | 167 | 0.572582 | false |
bloem-project/bloem-server | files/serializers.py | 1 | 1692 | """Bloem's Files application model serializers.
This file contains the various serializers used by the Files application's API.
It defines how the data is presented for each model viewed through the API.
"""
from django_celery_results.models import TaskResult
from rest_framework import serializers
from files.models import File, InboxItem, Tag, Namespace, Person, Directory
class DirectorySerializer(serializers.ModelSerializer):
"""Serializer for the File model."""
class Meta:
model = Directory
fields = "__all__"
class FileSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the File model."""
class Meta:
model = File
fields = ('id', 'hash', 'file_name', 'path', 'full_path', 'date_added', 'date_modified', 'tags')
class InboxItemSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the InboxItem model."""
class Meta:
model = InboxItem
fields = "__all__"
class NamespaceSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the InboxItem model."""
class Meta:
model = Namespace
fields = "__all__"
class TagSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the InboxItem model."""
class Meta:
model = Tag
fields = "__all__"
class PersonSerializer(serializers.HyperlinkedModelSerializer):
"""Serializer for the InboxItem model."""
class Meta:
model = Person
fields = "__all__"
class TaskResultSerializer(serializers.ModelSerializer):
"""Serializer for the InboxItem model."""
class Meta:
model = TaskResult
fields = "__all__"
| gpl-3.0 | -5,787,603,825,081,167,000 | 25.030769 | 104 | 0.683215 | false |
strk/QGIS | python/plugins/processing/gui/AlgorithmExecutor.py | 1 | 15842 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmExecutor.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import sys
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsFeatureSink,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMessageLog,
QgsProcessingException,
QgsProcessingFeatureSourceDefinition,
QgsProcessingFeatureSource,
QgsProcessingParameters,
QgsProject,
QgsFeatureRequest,
QgsFeature,
QgsExpression,
QgsWkbTypes,
QgsGeometry,
QgsVectorLayerUtils,
QgsVectorLayer)
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.tools import dataobjects
from qgis.utils import iface
def execute(alg, parameters, context=None, feedback=None):
"""Executes a given algorithm, showing its progress in the
progress object passed along.
Return true if everything went OK, false if the algorithm
could not be completed.
"""
if feedback is None:
feedback = QgsProcessingFeedback()
if context is None:
context = dataobjects.createContext(feedback)
try:
results, ok = alg.run(parameters, context, feedback)
return ok, results
except QgsProcessingException as e:
QgsMessageLog.logMessage(str(sys.exc_info()[0]), 'Processing', Qgis.Critical)
if feedback is not None:
feedback.reportError(e.msg)
return False, {}
def execute_in_place_run(alg, parameters, context=None, feedback=None, raise_exceptions=False):
"""Executes an algorithm modifying features in-place in the input layer.
:param alg: algorithm to run
:type alg: QgsProcessingAlgorithm
:param parameters: parameters of the algorithm
:type parameters: dict
:param context: context, defaults to None
:type context: QgsProcessingContext, optional
:param feedback: feedback, defaults to None
:type feedback: QgsProcessingFeedback, optional
:param raise_exceptions: useful for testing, if True exceptions are raised, normally exceptions will be forwarded to the feedback
:type raise_exceptions: boo, default to False
:raises QgsProcessingException: raised when there is no active layer, or it cannot be made editable
:return: a tuple with true if success and results
:rtype: tuple
"""
if feedback is None:
feedback = QgsProcessingFeedback()
if context is None:
context = dataobjects.createContext(feedback)
# Only feature based algs have sourceFlags
try:
if alg.sourceFlags() & QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks:
context.setInvalidGeometryCheck(QgsFeatureRequest.GeometryNoCheck)
except AttributeError:
pass
active_layer = parameters['INPUT']
# Run some checks and prepare the layer for in-place execution by:
# - getting the active layer and checking that it is a vector
# - making the layer editable if it was not already
# - selecting all features if none was selected
# - checking in-place support for the active layer/alg/parameters
# If one of the check fails and raise_exceptions is True an exception
# is raised, else the execution is aborted and the error reported in
# the feedback
try:
if active_layer is None:
raise QgsProcessingException(tr("There is not active layer."))
if not isinstance(active_layer, QgsVectorLayer):
raise QgsProcessingException(tr("Active layer is not a vector layer."))
if not active_layer.isEditable():
if not active_layer.startEditing():
raise QgsProcessingException(tr("Active layer is not editable (and editing could not be turned on)."))
if not alg.supportInPlaceEdit(active_layer):
raise QgsProcessingException(tr("Selected algorithm and parameter configuration are not compatible with in-place modifications."))
except QgsProcessingException as e:
if raise_exceptions:
raise e
QgsMessageLog.logMessage(str(sys.exc_info()[0]), 'Processing', Qgis.Critical)
if feedback is not None:
feedback.reportError(getattr(e, 'msg', str(e)), fatalError=True)
return False, {}
if not active_layer.selectedFeatureIds():
active_layer.selectAll()
# Make sure we are working on selected features only
parameters['INPUT'] = QgsProcessingFeatureSourceDefinition(active_layer.id(), True)
parameters['OUTPUT'] = 'memory:'
req = QgsFeatureRequest(QgsExpression(r"$id < 0"))
req.setFlags(QgsFeatureRequest.NoGeometry)
req.setSubsetOfAttributes([])
# Start the execution
# If anything goes wrong and raise_exceptions is True an exception
# is raised, else the execution is aborted and the error reported in
# the feedback
try:
new_feature_ids = []
active_layer.beginEditCommand(alg.displayName())
# Checks whether the algorithm has a processFeature method
if hasattr(alg, 'processFeature'): # in-place feature editing
# Make a clone or it will crash the second time the dialog
# is opened and run
alg = alg.create()
if not alg.prepare(parameters, context, feedback):
raise QgsProcessingException(tr("Could not prepare selected algorithm."))
# Check again for compatibility after prepare
if not alg.supportInPlaceEdit(active_layer):
raise QgsProcessingException(tr("Selected algorithm and parameter configuration are not compatible with in-place modifications."))
# some algorithms have logic in outputFields/outputCrs/outputWkbType which they require to execute before
# they can start processing features
_ = alg.outputFields(active_layer.fields())
_ = alg.outputWkbType(active_layer.wkbType())
_ = alg.outputCrs(active_layer.crs())
field_idxs = range(len(active_layer.fields()))
iterator_req = QgsFeatureRequest(active_layer.selectedFeatureIds())
iterator_req.setInvalidGeometryCheck(context.invalidGeometryCheck())
feature_iterator = active_layer.getFeatures(iterator_req)
step = 100 / len(active_layer.selectedFeatureIds()) if active_layer.selectedFeatureIds() else 1
for current, f in enumerate(feature_iterator):
if feedback.isCanceled():
break
# need a deep copy, because python processFeature implementations may return
# a shallow copy from processFeature
input_feature = QgsFeature(f)
new_features = alg.processFeature(input_feature, context, feedback)
new_features = QgsVectorLayerUtils.makeFeaturesCompatible(new_features, active_layer)
if len(new_features) == 0:
active_layer.deleteFeature(f.id())
elif len(new_features) == 1:
new_f = new_features[0]
if not f.geometry().equals(new_f.geometry()):
active_layer.changeGeometry(f.id(), new_f.geometry())
if f.attributes() != new_f.attributes():
active_layer.changeAttributeValues(f.id(), dict(zip(field_idxs, new_f.attributes())), dict(zip(field_idxs, f.attributes())))
new_feature_ids.append(f.id())
else:
active_layer.deleteFeature(f.id())
# Get the new ids
old_ids = set([f.id() for f in active_layer.getFeatures(req)])
# If multiple new features were created, we need to pass
# them to createFeatures to manage constraints correctly
features_data = []
for f in new_features:
features_data.append(QgsVectorLayerUtils.QgsFeatureData(f.geometry(), dict(enumerate(f.attributes()))))
new_features = QgsVectorLayerUtils.createFeatures(active_layer, features_data, context.expressionContext())
if not active_layer.addFeatures(new_features):
raise QgsProcessingException(tr("Error adding processed features back into the layer."))
new_ids = set([f.id() for f in active_layer.getFeatures(req)])
new_feature_ids += list(new_ids - old_ids)
feedback.setProgress(int((current + 1) * step))
results, ok = {}, True
else: # Traditional 'run' with delete and add features cycle
# There is no way to know if some features have been skipped
# due to invalid geometries
if context.invalidGeometryCheck() == QgsFeatureRequest.GeometrySkipInvalid:
selected_ids = active_layer.selectedFeatureIds()
else:
selected_ids = []
results, ok = alg.run(parameters, context, feedback)
if ok:
result_layer = QgsProcessingUtils.mapLayerFromString(results['OUTPUT'], context)
# TODO: check if features have changed before delete/add cycle
new_features = []
# Check if there are any skipped features
if context.invalidGeometryCheck() == QgsFeatureRequest.GeometrySkipInvalid:
missing_ids = list(set(selected_ids) - set(result_layer.allFeatureIds()))
if missing_ids:
for f in active_layer.getFeatures(QgsFeatureRequest(missing_ids)):
if not f.geometry().isGeosValid():
new_features.append(f)
active_layer.deleteFeatures(active_layer.selectedFeatureIds())
for f in result_layer.getFeatures():
new_features.extend(QgsVectorLayerUtils.
makeFeaturesCompatible([f], active_layer))
# Get the new ids
old_ids = set([f.id() for f in active_layer.getFeatures(req)])
if not active_layer.addFeatures(new_features):
raise QgsProcessingException(tr("Error adding processed features back into the layer."))
new_ids = set([f.id() for f in active_layer.getFeatures(req)])
new_feature_ids += list(new_ids - old_ids)
active_layer.endEditCommand()
if ok and new_feature_ids:
active_layer.selectByIds(new_feature_ids)
elif not ok:
active_layer.rollBack()
return ok, results
except QgsProcessingException as e:
active_layer.endEditCommand()
active_layer.rollBack()
if raise_exceptions:
raise e
QgsMessageLog.logMessage(str(sys.exc_info()[0]), 'Processing', Qgis.Critical)
if feedback is not None:
feedback.reportError(getattr(e, 'msg', str(e)), fatalError=True)
return False, {}
def execute_in_place(alg, parameters, context=None, feedback=None):
"""Executes an algorithm modifying features in-place, if the INPUT
parameter is not defined, the current active layer will be used as
INPUT.
:param alg: algorithm to run
:type alg: QgsProcessingAlgorithm
:param parameters: parameters of the algorithm
:type parameters: dict
:param context: context, defaults to None
:param context: QgsProcessingContext, optional
:param feedback: feedback, defaults to None
:param feedback: QgsProcessingFeedback, optional
:raises QgsProcessingException: raised when the layer is not editable or the layer cannot be found in the current project
:return: a tuple with true if success and results
:rtype: tuple
"""
if feedback is None:
feedback = QgsProcessingFeedback()
if context is None:
context = dataobjects.createContext(feedback)
if not 'INPUT' in parameters or not parameters['INPUT']:
parameters['INPUT'] = iface.activeLayer()
ok, results = execute_in_place_run(alg, parameters, context=context, feedback=feedback)
if ok:
if isinstance(parameters['INPUT'], QgsProcessingFeatureSourceDefinition):
layer = alg.parameterAsVectorLayer({'INPUT': parameters['INPUT'].source}, 'INPUT', context)
elif isinstance(parameters['INPUT'], QgsVectorLayer):
layer = parameters['INPUT']
if layer:
layer.triggerRepaint()
return ok, results
def executeIterating(alg, parameters, paramToIter, context, feedback):
# Generate all single-feature layers
parameter_definition = alg.parameterDefinition(paramToIter)
if not parameter_definition:
return False
iter_source = QgsProcessingParameters.parameterAsSource(parameter_definition, parameters, context)
sink_list = []
if iter_source.featureCount() == 0:
return False
step = 100.0 / iter_source.featureCount()
for current, feat in enumerate(iter_source.getFeatures()):
if feedback.isCanceled():
return False
sink, sink_id = QgsProcessingUtils.createFeatureSink('memory:', context, iter_source.fields(), iter_source.wkbType(), iter_source.sourceCrs())
sink_list.append(sink_id)
sink.addFeature(feat, QgsFeatureSink.FastInsert)
del sink
feedback.setProgress(int((current + 1) * step))
# store output values to use them later as basenames for all outputs
outputs = {}
for out in alg.destinationParameterDefinitions():
if out.name() in parameters:
outputs[out.name()] = parameters[out.name()]
# now run all the algorithms
for i, f in enumerate(sink_list):
if feedback.isCanceled():
return False
parameters[paramToIter] = f
for out in alg.destinationParameterDefinitions():
if out.name() not in outputs:
continue
o = outputs[out.name()]
parameters[out.name()] = QgsProcessingUtils.generateIteratingDestination(o, i, context)
feedback.setProgressText(QCoreApplication.translate('AlgorithmExecutor', 'Executing iteration {0}/{1}…').format(i + 1, len(sink_list)))
feedback.setProgress(int((i + 1) * 100 / len(sink_list)))
ret, results = execute(alg, parameters, context, feedback)
if not ret:
return False
handleAlgorithmResults(alg, context, feedback, False)
return True
def tr(string, context=''):
if context == '':
context = 'AlgorithmExecutor'
return QCoreApplication.translate(context, string)
| gpl-2.0 | 6,528,731,308,621,374,000 | 42.878116 | 150 | 0.61654 | false |
doraemonext/wechat-platform | wechat_platform/system/rule_match/models.py | 1 | 2385 | # -*- coding: utf-8 -*-
import logging
from django.db import models
from system.official_account.models import OfficialAccount
from system.rule.models import Rule
logger_rule_match = logging.getLogger(__name__)
class RuleMatchManager(models.Manager):
"""
微信规则回复表 Manager
"""
def add(self, rule, plugin_iden, reply_id=0, order=0, status=True):
"""
添加微信规则回复
"""
rule_match = super(RuleMatchManager, self).create(
official_account=rule.official_account,
rule=rule,
plugin_iden=plugin_iden,
reply_id=reply_id,
order=order,
status=status
)
logger_rule_match.info('New rule_match created [Detail] %s' % rule_match.__dict__)
return rule_match
def get(self, rule):
"""
根据 rule 返回相应的 QuerySet 集合
返回的集合已经按照优先级排序完毕, 且剔除掉了没有启用的匹配
"""
return super(RuleMatchManager, self).get_queryset().filter(
official_account=rule.official_account
).filter(
rule=rule
).filter(
status=True
).order_by(
'-order', 'id'
)
def get_news(self, news_id):
"""
根据 news_id 返回表中所有对应的图文匹配集合
:param news_id: 图文响应ID
"""
return super(RuleMatchManager, self).get_queryset().filter(
plugin_iden='news'
).filter(
reply_id=news_id
)
class RuleMatch(models.Model):
"""
微信规则匹配表
"""
official_account = models.ForeignKey(OfficialAccount, verbose_name=u'所属公众号')
rule = models.ForeignKey(Rule, verbose_name=u'所属规则')
plugin_iden = models.CharField(u'响应插件标识符', max_length=50)
reply_id = models.PositiveIntegerField(u'响应ID号', default=0)
order = models.PositiveIntegerField(u'优先级', default=0)
status = models.BooleanField(u'是否启用', default=True)
objects = models.Manager()
manager = RuleMatchManager()
class Meta:
verbose_name = u'微信规则匹配表'
verbose_name_plural = u'微信规则匹配表'
db_table = 'wechat_rule_match'
def __unicode__(self):
return self.plugin_iden
| bsd-2-clause | 1,502,957,737,470,831,000 | 25.481481 | 90 | 0.592075 | false |
unrealcv/unrealcv | docs/build_doc.py | 1 | 2389 | import argparse, subprocess, platform, os, shutil, webbrowser
def clean():
files = []
folders = ['_build', 'doxygen/html', 'doxygen/latex', 'doxygen/xml']
for f in files:
if os.path.isfile(f):
os.remove(f)
for f in folders:
if os.path.isdir(f):
shutil.rmtree(f)
def lfs_checkout():
import git_lfs
doc_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.dirname(doc_dir)
git_lfs.fetch(project_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--build_doxygen',
action='store_true', default=False
)
parser.add_argument('--rtd',
action='store_true', default=False,
help='Simulate running on RTD server'
)
parser.add_argument('--clean',
action='store_true', default=False,
help='Remove build artifacts'
)
parser.add_argument('--rebuild',
action='store_true', default=False,
help='Rebuild all the files to see all the warnings. By default only diffs are built to save time.'
)
# build diff takes about less than 1 second
# a full rebuild takes about 5 minutes
args = parser.parse_args()
is_build_doxygen = args.build_doxygen
is_on_rtd = args.rtd
is_clean = args.clean
rebuild = args.rebuild
if is_clean:
clean()
return
if is_build_doxygen:
try:
subprocess.call(['doxygen', 'Doxyfile'])
except Exception as e:
print('Failed to run doxygen')
print(e)
env = dict(os.environ)
if is_on_rtd:
lfs_checkout() # Run this here so that it is easier to debug
env['READTHEDOCS'] = 'True'
doc_folder = os.path.dirname(os.path.realpath(__file__))
output_folder = os.path.join(doc_folder, '_build/html')
cmd = [
'sphinx-build', '-n',
'-b', 'html', # build format
doc_folder, output_folder, # input, output folder
'-j', '16', # build in parallel
]
if rebuild:
clean()
# cmd.append('-a')
print(cmd)
subprocess.call(cmd, env = env)
# subprocess.call(cmd, env = os.environ)
index_file = os.path.join(output_folder, 'index.html')
print('Open compiled html in the browser')
webbrowser.open_new('file://' + os.path.realpath(index_file))
if __name__ == '__main__':
main()
| mit | 1,137,941,226,037,286,000 | 26.45977 | 107 | 0.590205 | false |
roidelapluie/Lexpage | app/blog/migrations/0001_initial.py | 1 | 2391 | from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='Titre')),
('slug', models.SlugField(max_length=120)),
('tags', models.CharField(help_text='\xc9tiquettes associ\xe9es, s\xe9par\xe9es par un espace.', max_length=100, verbose_name='\xc9tiquettes', blank=True)),
('abstract', models.TextField(help_text='Mis en page avec Markdown.', verbose_name='Chapeau')),
('text', models.TextField(help_text='Mis en page avec Markdown.', verbose_name='Contenu', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date de cr\xe9ation')),
('date_approved', models.DateTimeField(null=True, verbose_name='Date de validation', blank=True)),
('date_published', models.DateTimeField(null=True, verbose_name='Date de publication', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Date de derni\xe8re modification', null=True)),
('priority', models.SmallIntegerField(default=5, verbose_name='Priorit\xe9', choices=[(1, 'Urgente'), (2, 'Haute'), (5, 'Normale'), (8, 'Faible'), (14, 'Tr\xe8s faible')])),
('status', models.SmallIntegerField(default=1, verbose_name='\xc9tat', choices=[(1, 'Brouillon'), (2, 'Propos\xe9'), (3, 'Valid\xe9'), (4, 'Publi\xe9'), (5, 'Masqu\xe9')])),
('approved_by', models.ForeignKey(related_name='+', verbose_name='Validateur', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
('author', models.ForeignKey(verbose_name='Auteur', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_published'],
'get_latest_by': 'date_published',
'verbose_name': 'Billet',
'permissions': (('can_approve', 'Peut valider'),),
},
bases=(models.Model,),
),
]
| gpl-3.0 | 4,747,349,197,327,999,000 | 61.921053 | 189 | 0.596821 | false |
p2irc/deepplantphenomics | deepplantphenomics/classification_model.py | 1 | 18589 | from . import layers, loaders, definitions, DPPModel
import numpy as np
import tensorflow.compat.v1 as tf
import os
import datetime
import time
import warnings
import copy
from tqdm import tqdm
class ClassificationModel(DPPModel):
_supported_loss_fns = ['softmax cross entropy']
_supported_augmentations = [definitions.AugmentationType.FLIP_HOR,
definitions.AugmentationType.FLIP_VER,
definitions.AugmentationType.CROP,
definitions.AugmentationType.CONTRAST_BRIGHT,
definitions.AugmentationType.ROTATE]
def __init__(self, debug=False, load_from_saved=False, save_checkpoints=True, initialize=True, tensorboard_dir=None,
report_rate=100, save_dir=None):
super().__init__(debug, load_from_saved, save_checkpoints, initialize, tensorboard_dir, report_rate, save_dir)
self._loss_fn = 'softmax cross entropy'
# State variables specific to classification for constructing the graph and passing to Tensorboard
self.__class_predictions = None
self.__val_class_predictions = None
def _graph_tensorboard_summary(self, l2_cost, gradients, variables, global_grad_norm):
super()._graph_tensorboard_common_summary(l2_cost, gradients, variables, global_grad_norm)
# Summaries specific to classification problems
tf.summary.scalar('train/accuracy', self._graph_ops['accuracy'], collections=['custom_summaries'])
tf.summary.histogram('train/class_predictions', self.__class_predictions, collections=['custom_summaries'])
if self._validation:
tf.summary.scalar('validation/accuracy', self._graph_ops['val_accuracy'],
collections=['custom_summaries'])
tf.summary.histogram('validation/class_predictions', self.__val_class_predictions,
collections=['custom_summaries'])
self._graph_ops['merged'] = tf.summary.merge_all(key='custom_summaries')
def _assemble_graph(self):
with self._graph.as_default():
self._log('Assembling graph...')
self._log('Graph: Parsing dataset...')
with tf.device("/device:cpu:0"): # Only do preprocessing on the CPU to limit data transfer between devices
# Generate training, testing, and validation datasets
self._graph_parse_data()
# Batch the datasets and create iterators for them
train_iter = self._batch_and_iterate(self._train_dataset, shuffle=True)
if self._testing:
test_iter = self._batch_and_iterate(self._test_dataset)
if self._validation:
val_iter = self._batch_and_iterate(self._val_dataset)
if self._has_moderation:
train_mod_iter = self._batch_and_iterate(self._train_moderation_features)
if self._testing:
test_mod_iter = self._batch_and_iterate(self._test_moderation_features)
if self._validation:
val_mod_iter = self._batch_and_iterate(self._val_moderation_features)
# # If we are using patching, we extract a random patch from the image here
# if self._with_patching:
# x, offsets = self._graph_extract_patch(x)
# Create an optimizer object for all of the devices
optimizer = self._graph_make_optimizer()
# Set up the graph layers
self._log('Graph: Creating layer parameters...')
self._add_layers_to_graph()
# Do the forward pass and training output calcs on possibly multiple GPUs
device_costs = []
device_accuracies = []
device_gradients = []
device_variables = []
for n, d in enumerate(self._get_device_list()): # Build a graph on either the CPU or all of the GPUs
with tf.device(d), tf.name_scope('tower_' + str(n)):
x, y = train_iter.get_next()
# Run the network operations
if self._has_moderation:
mod_w = train_mod_iter.get_next()
xx = self.forward_pass(x, deterministic=False, moderation_features=mod_w)
else:
xx = self.forward_pass(x, deterministic=False)
# Define regularization cost
self._log('Graph: Calculating loss and gradients...')
l2_cost = self._graph_layer_loss()
# Define the cost function, then get the cost for this device's sub-batch and any parts of the cost
# needed to get the overall batch's cost later
pred_loss = self._graph_problem_loss(xx, y)
gpu_cost = tf.reduce_mean(tf.concat([pred_loss], axis=0)) + l2_cost
cost_sum = tf.reduce_sum(tf.concat([pred_loss], axis=0))
device_costs.append(cost_sum)
# For classification, we need the training accuracy as well so we can report it in Tensorboard
self.__class_predictions, correct_predictions = self._graph_compare_predictions(xx, y)
accuracy_sum = tf.reduce_sum(tf.cast(correct_predictions, tf.float32))
device_accuracies.append(accuracy_sum)
# Set the optimizer and get the gradients from it
gradients, variables, global_grad_norm = self._graph_get_gradients(gpu_cost, optimizer)
device_gradients.append(gradients)
device_variables.append(variables)
# Average the gradients from each GPU and apply them
average_gradients = self._graph_average_gradients(device_gradients)
opt_variables = device_variables[0]
self._graph_ops['optimizer'] = self._graph_apply_gradients(average_gradients, opt_variables, optimizer)
# Average the costs and accuracies from each GPU
self._graph_ops['cost'] = tf.reduce_sum(device_costs) / self._batch_size + l2_cost
self._graph_ops['accuracy'] = tf.reduce_sum(device_accuracies) / self._batch_size
# # If using patching, we need to properly pull similar patches from the test and validation images
# if self._with_patching:
# if self._testing:
# x_test, _ = self._graph_extract_patch(x_test, offsets)
# if self._validation:
# x_val, _ = self._graph_extract_patch(x_val, offsets)
if self._testing:
x_test, self._graph_ops['y_test'] = test_iter.get_next()
if self._has_moderation:
mod_w_test = test_mod_iter.get_next()
self._graph_ops['x_test_predicted'] = self.forward_pass(x_test, deterministic=True,
moderation_features=mod_w_test)
else:
self._graph_ops['x_test_predicted'] = self.forward_pass(x_test, deterministic=True)
_, self._graph_ops['test_losses'] = self._graph_compare_predictions(self._graph_ops['x_test_predicted'],
self._graph_ops['y_test'])
self._graph_ops['test_accuracy'] = tf.reduce_mean(tf.cast(self._graph_ops['test_losses'], tf.float32))
if self._validation:
x_val, self._graph_ops['y_val'] = val_iter.get_next()
if self._has_moderation:
mod_w_val = val_mod_iter.get_next()
self._graph_ops['x_val_predicted'] = self.forward_pass(x_val, deterministic=True,
moderation_features=mod_w_val)
else:
self._graph_ops['x_val_predicted'] = self.forward_pass(x_val, deterministic=True)
_, self._graph_ops['val_losses'] = self._graph_compare_predictions(self._graph_ops['x_val_predicted'],
self._graph_ops['y_val'])
self._graph_ops['val_accuracy'] = tf.reduce_mean(tf.cast(self._graph_ops['val_losses'], tf.float32))
# Epoch summaries for Tensorboard
if self._tb_dir is not None:
self._graph_tensorboard_summary(l2_cost, average_gradients, opt_variables, global_grad_norm)
def _graph_problem_loss(self, pred, lab):
if self._loss_fn == 'softmax cross entropy':
lab_idx = tf.argmax(lab, axis=1)
return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=lab_idx)
raise RuntimeError("Could not calculate problem loss for a loss function of " + self._loss_fn)
def _graph_compare_predictions(self, pred, lab):
"""
Compares the prediction and label classification for each item in a batch, returning
:param pred: Model class predictions for the batch; no softmax should be applied to it yet
:param lab: Labels for the correct class, with the same shape as pred
:return: 2 Tensors: one with the simplified class predictions (i.e. as a single number), and one with integer
flags (i.e. 1's and 0's) for whether predictions are correct
"""
pred_idx = tf.argmax(tf.nn.softmax(pred), axis=1)
lab_idx = tf.argmax(lab, axis=1)
is_correct = tf.equal(pred_idx, lab_idx)
return pred_idx, is_correct
def _training_batch_results(self, batch_num, start_time, tqdm_range, train_writer=None):
elapsed = time.time() - start_time
if train_writer is not None:
summary = self._session.run(self._graph_ops['merged'])
train_writer.add_summary(summary, batch_num)
if self._validation:
loss, epoch_accuracy, epoch_val_accuracy = self._session.run([self._graph_ops['cost'],
self._graph_ops['accuracy'],
self._graph_ops['val_accuracy']])
samples_per_sec = self._batch_size / elapsed
desc_str = "{}: Results for batch {} (epoch {:.1f}) " + \
"- Loss: {:.5f}, Training Accuracy: {:.4f}, Validation Accuracy: {:.4f}, samples/sec: {:.2f}"
tqdm_range.set_description(
desc_str.format(datetime.datetime.now().strftime("%I:%M%p"),
batch_num,
batch_num / (self._total_training_samples / self._batch_size),
loss,
epoch_accuracy,
epoch_val_accuracy,
samples_per_sec))
else:
loss, epoch_accuracy = self._session.run([self._graph_ops['cost'],
self._graph_ops['accuracy']])
samples_per_sec = self._batch_size / elapsed
desc_str = "{}: Results for batch {} (epoch {:.1f}) " + \
"- Loss: {:.5f}, Training Accuracy: {:.4f}, samples/sec: {:.2f}"
tqdm_range.set_description(
desc_str.format(datetime.datetime.now().strftime("%I:%M%p"),
batch_num,
batch_num / (self._total_training_samples / self._batch_size),
loss,
epoch_accuracy,
samples_per_sec))
def compute_full_test_accuracy(self):
self._log('Computing total test accuracy/regression loss...')
with self._graph.as_default():
num_batches = int(np.ceil(self._total_testing_samples / self._batch_size))
if num_batches == 0:
warnings.warn('Less than a batch of testing data')
exit()
# Initialize storage for the retrieved test variables
loss_sum = 0.0
# Main test loop
for _ in tqdm(range(num_batches)):
batch_mean = self._session.run([self._graph_ops['test_losses']])
loss_sum = loss_sum + np.mean(batch_mean)
# For classification problems (assumed to be multi-class), we want accuracy and confusion matrix (not
# implemented)
mean = (loss_sum / num_batches)
self._log('Average test accuracy: {:.5f}'.format(mean))
return 1.0 - mean.astype(np.float32)
def forward_pass_with_file_inputs(self, images):
with self._graph.as_default():
num_batches = len(images) // self._batch_size
if len(images) % self._batch_size != 0:
num_batches += 1
self._parse_images(images)
im_data = self._all_images.batch(self._batch_size).prefetch(1)
x_test = im_data.make_one_shot_iterator().get_next()
if self._load_from_saved:
self.load_state()
# Run model on them
x_pred = self.forward_pass(x_test, deterministic=True)
total_outputs = []
for i in range(int(num_batches)):
xx = self._session.run(x_pred)
for img in np.array_split(xx, xx.shape[0]):
total_outputs.append(img)
total_outputs = np.concatenate(total_outputs, axis=0)
return total_outputs
def forward_pass_with_interpreted_outputs(self, x):
# Perform forward pass of the network to get raw outputs and apply a softmax
xx = self.forward_pass_with_file_inputs(x)
interpreted_outputs = np.exp(xx) / np.sum(np.exp(xx), axis=1, keepdims=True)
return interpreted_outputs
def add_output_layer(self, regularization_coefficient=None, output_size=None):
if len(self._layers) < 1:
raise RuntimeError("An output layer cannot be the first layer added to the model. " +
"Add an input layer with DPPModel.add_input_layer() first.")
if regularization_coefficient is not None:
if not isinstance(regularization_coefficient, float):
raise TypeError("regularization_coefficient must be a float or None")
if regularization_coefficient < 0:
raise ValueError("regularization_coefficient must be non-negative")
if output_size is not None:
if not isinstance(output_size, int):
raise TypeError("output_size must be an int or None")
if output_size <= 0:
raise ValueError("output_size must be positive")
self._log('Adding output layer...')
reshape = self._last_layer_outputs_volume()
if regularization_coefficient is None and self._reg_coeff is not None:
regularization_coefficient = self._reg_coeff
if regularization_coefficient is None and self._reg_coeff is None:
regularization_coefficient = 0.0
if output_size is None:
num_out = self._total_classes
else:
num_out = output_size
with self._graph.as_default():
layer = layers.fullyConnectedLayer('output', copy.deepcopy(self._last_layer().output_size), num_out,
reshape, None, self._weight_initializer, regularization_coefficient)
self._log('Inputs: {0} Outputs: {1}'.format(layer.input_size, layer.output_size))
self._layers.append(layer)
def load_dataset_from_directory_with_auto_labels(self, dirname):
"""Loads the png images in the given directory, using subdirectories to separate classes."""
# Load all file names and labels into arrays
subdirs = list(filter(lambda item: os.path.isdir(item) & (item != '.DS_Store'),
[os.path.join(dirname, f) for f in os.listdir(dirname)]))
num_classes = len(subdirs)
image_files = []
labels = np.array([])
for sd in subdirs:
image_paths = [os.path.join(sd, name) for name in os.listdir(sd) if
os.path.isfile(os.path.join(sd, name)) & name.endswith('.png')]
image_files = image_files + image_paths
# for one-hot labels
current_labels = np.zeros((num_classes, len(image_paths)))
current_labels[self._total_classes, :] = 1
labels = np.hstack([labels, current_labels]) if labels.size else current_labels
self._total_classes += 1
labels = tf.transpose(labels)
self._total_raw_samples = len(image_files)
self._log('Total raw examples is %d' % self._total_raw_samples)
self._log('Total classes is %d' % self._total_classes)
self._log('Parsing dataset...')
self._raw_image_files = image_files
self._raw_labels = labels
def load_ippn_dataset_from_directory(self, dirname, column='strain'):
"""Loads the RGB images and species labels from the International Plant Phenotyping Network dataset."""
labels = []
ids = []
if column == 'treatment':
labels, ids = loaders.read_csv_labels_and_ids(os.path.join(dirname, 'Metadata.csv'), 2, 0)
elif column == 'strain':
labels, ids = loaders.read_csv_labels_and_ids(os.path.join(dirname, 'Metadata.csv'), 1, 0)
elif column == 'DAG':
labels, ids = loaders.read_csv_labels_and_ids(os.path.join(dirname, 'Metadata.csv'), 3, 0)
else:
warnings.warn('Unknown column in IPPN dataset')
exit()
image_files = [os.path.join(dirname, im_id + '_rgb.png') for im_id in ids]
self._total_raw_samples = len(image_files)
self._total_classes = len(set(labels))
# transform into numerical one-hot labels
with self._graph.as_default():
labels = loaders.string_labels_to_sequential(labels)
labels = tf.one_hot(labels, self._total_classes)
self._log('Total classes is %d' % self._total_classes)
self._log('Total raw examples is %d' % self._total_raw_samples)
self._raw_image_files = image_files
self._raw_labels = labels
| gpl-2.0 | -215,691,109,758,329,060 | 48.43883 | 120 | 0.56329 | false |
refugeehackathon/interpreteer-backend | bill_board/models.py | 1 | 3273 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models
# Create your models here.
from user_management.models import Language, Location
DIRECTION_CHOICES = (
(0, "required to known"),
(1, "known to required"),
(2, "both"),
)
TYPE_CHOICES = (
(0, "visit authorities"),
(1, "letter"),
)
class Request(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="requests")
required_language = models.ForeignKey(Language, related_name="required_by")
known_languages = models.ManyToManyField(Language, related_name="known_for_request")
direction = models.PositiveSmallIntegerField(choices=DIRECTION_CHOICES)
kind = models.IntegerField(choices=TYPE_CHOICES)
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
location = models.ForeignKey(Location, related_name="requests")
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
requires_presence = models.BooleanField(default=False)
def __str__(self):
return "<Request: %s, %s, %s, %s>" % (self.user.username, self.start_time, self.end_time, self.kind)
def matching_offers(self):
offers = Offer.objects.filter(kind=self.kind)
if self.start_time is not None and self.end_time is not None:
offers = offers.filter(
start_time__lte=self.start_time,
end_time__gte=self.end_time)
if self.direction == 0:
offers = offers.filter(
user__translation_skills__source_language=self.required_language
).filter(
user__translation_skills__destination_language_id__in=self.known_languages.values('id')
)
elif self.direction == 1:
offers = offers.filter(
user__translation_skills__source_language__in=self.known_languages.values('id')
).filter(
user__translation_skills__destination_language_id=self.required_language
)
elif self.direction == 2:
offers_1 = offers.filter(
user__translation_skills__source_language=self.required_language
).filter(
user__translation_skills__destination_language_id__in=self.known_languages.values('id')
)
offers_2 = offers.filter(
user__translation_skills__source_language__in=self.known_languages.values('id')
).filter(
user__translation_skills__destination_language_id=self.required_language
)
offers = offers_1 & offers_2
return offers
class Offer(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, related_name="offers")
location = models.ForeignKey(Location, related_name="offers")
kind = models.IntegerField(choices=TYPE_CHOICES)
title = models.CharField(max_length=255)
description = models.TextField(blank=True)
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return "<Offer: %s, %s, %s, %s>" % (self.user.username, self.start_time, self.end_time, self.kind)
| gpl-2.0 | 837,201,081,823,674,600 | 38.914634 | 108 | 0.643141 | false |
Phoenyx/TruemaxScriptPackage | Truemax/moduleRig.py | 1 | 2707 | from Truemax.moduleScene import get_author_initials
__author__ = 'sofiaelm'
import manager
import maya.cmds as cmds
from pymel.all import mel
if get_author_initials() == 'mj':
bg_colour = [0.9, 0.4, 1]
else:
bg_colour = [0.4, 0.4, 0.4]
class ModuleRig(manager.Module):
def create_ui(self):
tab = str(cmds.columnLayout())
cmds.separator(style="none")
cmds.frameLayout(collapsable=True, label="Common")
cmds.columnLayout()
cmds.button(command=lambda *args: mel.abAutoRig(), label="AB Auto Rig", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.rbSetLocalPivot(), label="Set Geo Pivot To Sel",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.joint_at_pivot(), label="Joint at Pivot", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.JointTool(), label="Joint Tool", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.rbZeroTransformer("_zero"), label="Zero Out Joint",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.IKHandleTool(), label="IK Handle Tool", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.scMoveJntsModeOnOff(1), label="Move Joints On", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.scMoveJntsModeOnOff(0), label="Move Joints Off",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.rb_ShapesWindow(), label="Controller Shapes", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.wp_shapeParent(), label="Parent Shape", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.ArtPaintSkinWeightsTool(), label="Maya Paint Skin Weights",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.paintSkinWeights(), label="Vertex Paint Skin Weights",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.cometSaveWeights(), label="-Comet- Save Skin Weights",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.objectColorPalette(), label="Wireframe Colour", backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.lockNonKeyable_all(), label="Lock and make Non-keyable (Selected)",
backgroundColor=bg_colour)
cmds.button(command=lambda *args: mel.NodeEditorWindow(), label="Node Editor", backgroundColor=bg_colour)
cmds.setParent('..')
cmds.setParent('..')
cmds.setParent('..')
return tab, "Rig"
def initModule(manager):
return ModuleRig(manager) | gpl-2.0 | 5,974,088,988,912,934,000 | 52.098039 | 120 | 0.671592 | false |
qnu/paramark | modules/verbose.py | 1 | 1471 | #############################################################################
# ParaMark: A Parallel/Distributed File Systems Benchmark
# Copyright (C) 2009,2010 Nan Dun <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
#
# modules/verbose.py
# Vebose Management
#
import sys
__all__ = ["message", "warning", "verbose", "fatal"]
verbosity = 0
def message(s):
sys.stdout.flush()
sys.stdout.write("%s\n" % s)
def warning(s):
sys.stdout.flush()
sys.stdout.write("Warning: %s\n" % s)
def debug(s):
sys.stdout.write("Debug: %s\n" % s)
def verbose(s, level=0):
if verbosity >= level:
sys.stdout.flush()
sys.stdout.write("%s\n" % s)
def fatal(s, ret=1):
sys.stdout.flush()
sys.stderr.write("Fatal: %s\n" % s)
sys.exit(ret)
| gpl-3.0 | -6,130,123,023,139,589,000 | 29.020408 | 77 | 0.614548 | false |
visionegg/visionegg | demo/demo_arrow.py | 1 | 1662 | #!/usr/bin/env python
"""Demo for the arrow stimulus."""
# Author(s): Hubertus Becker <[email protected]>
# Copyright: (C) 2004-2005 by Hertie Institute for Clinical Brain Research,
# Department of Cognitive Neurology, University of Tuebingen
# URL: http://www.hubertus-becker.de/resources/visionegg/
# $Revision$ $Date$
############################
# Import various modules #
############################
import VisionEgg
VisionEgg.start_default_logging();
VisionEgg.watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.FlowControl import Presentation
from VisionEgg.MoreStimuli import *
#####################################
# Initialize OpenGL window/screen #
#####################################
screen = get_default_screen()
screen.parameters.bgcolor = (0.0,0.0,0.0,0.0) # Make it black (RGBA)
######################################
# Arrow stimulus #
######################################
stimulus = Arrow(
anchor = 'center',
position = (screen.size[0]/2.0, screen.size[1]/2.0),
size = (64.0, 16.0),
color = (1.0, 1.0, 1.0, 1.0), # Draw it in white (RGBA)
orientation = 0.0 # Right
)
###############################################################
# Create viewport - intermediary between stimuli and screen #
###############################################################
viewport = Viewport( screen=screen, stimuli=[stimulus] )
########################################
# Create presentation object and go! #
########################################
p = Presentation(go_duration=(5.0, 'seconds'), viewports=[viewport])
p.go()
| lgpl-2.1 | 4,718,557,124,976,209,000 | 29.777778 | 75 | 0.501805 | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/tests/python/unittest/test_numpy_ndarray.py | 1 | 50604 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import absolute_import
from __future__ import division
import itertools
import os
import unittest
import numpy as _np
import mxnet as mx
from mxnet import np, npx, autograd
from mxnet.gluon import HybridBlock
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, use_np
from common import with_seed, TemporaryDirectory
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like
from mxnet.ndarray.ndarray import py_slice
from mxnet.base import integer_types
import scipy.stats as ss
@with_seed()
@use_np
def test_np_empty():
# (input dtype, expected output dtype)
dtype_pairs = [
(np.int8, np.int8),
(np.int32, np.int32),
(np.float16, np.float16),
(np.float32, np.float32),
(np.float64, np.float64),
(np.bool_, np.bool_),
(np.bool, np.bool_),
('int8', np.int8),
('int32', np.int32),
('float16', np.float16),
('float32', np.float32),
('float64', np.float64),
('bool', np.bool_),
(None, np.float32),
]
orders = ['C', 'F', 'A']
shapes = [
(),
0,
(0,),
(0, 0),
2,
(2,),
(3, 0),
(4, 5),
(1, 1, 1, 1),
]
ctxes = [npx.current_context(), None]
for dtype, expected_dtype in dtype_pairs:
for shape in shapes:
for order in orders:
for ctx in ctxes:
if order == 'C':
ret = np.empty(shape, dtype, order, ctx)
assert ret.dtype == expected_dtype
assert ret.shape == shape if isinstance(shape, tuple) else (shape,)
assert ret.ctx == npx.current_context()
else:
assert_exception(np.empty, NotImplementedError, shape, dtype, order, ctx)
@with_seed()
@use_np
def test_np_array_creation():
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_,
'int8', 'int32', 'float16', 'float32', 'float64', 'bool', None]
objects = [
[],
(),
[[1, 2], [3, 4]],
_np.random.randint(-10, 10, size=rand_shape_nd(3)),
_np.random.uniform(size=rand_shape_nd(3)),
_np.random.uniform(size=(3, 0, 4))
]
for dtype in dtypes:
for src in objects:
mx_arr = np.array(src, dtype=dtype)
assert mx_arr.ctx == mx.current_context()
if dtype is None:
dtype = src.dtype if isinstance(src, _np.ndarray) else _np.float32
if isinstance(src, mx.nd.NDArray):
np_arr = _np.array(src.asnumpy(), dtype=dtype)
else:
np_arr = _np.array(src, dtype=dtype)
assert mx_arr.dtype == np_arr.dtype
assert same(mx_arr.asnumpy(), np_arr)
@with_seed()
@use_np
def test_np_zeros():
# test np.zeros in Gluon
class TestZeros(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestZeros, self).__init__()
self._shape = shape
self._dtype = dtype
def hybrid_forward(self, F, x, *args, **kwargs):
return x + F.np.zeros(shape, dtype)
class TestZerosOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.zeros(shape=())
# test np.zeros in imperative
def check_zero_array_creation(shape, dtype):
np_out = _np.zeros(shape=shape, dtype=dtype)
mx_out = np.zeros(shape=shape, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
else:
assert mx_out.dtype == np_out.dtype
shapes = [(0,), (2, 0, 2), (0, 0, 0, 0), ()]
shapes += [rand_shape_nd(ndim, allow_zero_size=True) for ndim in range(5)]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for shape in shapes:
for dtype in dtypes:
check_zero_array_creation(shape, dtype)
x = np.array(_np.random.uniform(size=shape), dtype=dtype)
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_zeros = TestZeros(shape, dtype)
test_zeros_output_type = TestZerosOutputType()
if hybridize:
test_zeros.hybridize()
test_zeros_output_type.hybridize()
y = test_zeros(x)
assert type(y) == np.ndarray
assert same(x.asnumpy(), y.asnumpy())
y = test_zeros_output_type(x)
assert type(y[1]) == np.ndarray
for shape in shapes:
for dtype in [_np.bool, bool, _np.bool, 'bool']:
check_zero_array_creation(shape, dtype)
@with_seed()
@use_np
def test_np_ones():
# test np.ones in Gluon
class TestOnes(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestOnes, self).__init__()
self._shape = shape
self._dtype = dtype
def hybrid_forward(self, F, x, *args, **kwargs):
return x * F.np.ones(shape, dtype)
class TestOnesOutputType(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x, F.np.ones(shape=())
# test np.ones in imperative
def check_ones_array_creation(shape, dtype):
np_out = _np.ones(shape=shape, dtype=dtype)
mx_out = np.ones(shape=shape, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
else:
assert mx_out.dtype == np_out.dtype
shapes = [(0,), (2, 0, 2), (0, 0, 0, 0), ()]
shapes += [rand_shape_nd(ndim, allow_zero_size=True) for ndim in range(5)]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for shape in shapes:
for dtype in dtypes:
check_ones_array_creation(shape, dtype)
x = mx.nd.array(_np.random.uniform(size=shape), dtype=dtype).as_np_ndarray()
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_ones = TestOnes(shape, dtype)
test_ones_output_type = TestOnesOutputType()
if hybridize:
test_ones.hybridize()
test_ones_output_type.hybridize()
y = test_ones(x)
assert type(y) == np.ndarray
assert same(x.asnumpy(), y.asnumpy())
y = test_ones_output_type(x)
assert type(y[1]) == np.ndarray
for shape in shapes:
for dtype in [_np.bool, bool, _np.bool, 'bool']:
check_ones_array_creation(shape, dtype)
@with_seed()
@use_np
def test_identity():
class TestIdentity(HybridBlock):
def __init__(self, shape, dtype=None):
super(TestIdentity, self).__init__()
self._n = n
self._dtype = dtype
def hybrid_forward(self, F, x):
return x * F.np.identity(self._n, self._dtype)
class TestIdentityOutputType(HybridBlock):
def hybrid_forward(self, F, x):
return x, F.np.identity(0)
def check_identity_array_creation(shape, dtype):
np_out = _np.identity(n=n, dtype=dtype)
mx_out = np.identity(n=n, dtype=dtype)
assert same(mx_out.asnumpy(), np_out)
if dtype is None:
assert mx_out.dtype == _np.float32
assert np_out.dtype == _np.float64
ns = [0, 1, 2, 3, 5, 15, 30, 200]
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, None]
for n in ns:
for dtype in dtypes:
check_identity_array_creation(n, dtype)
x = mx.nd.array(_np.random.uniform(size=(n, n)), dtype=dtype).as_np_ndarray()
if dtype is None:
x = x.astype('float32')
for hybridize in [True, False]:
test_identity = TestIdentity(n, dtype)
test_identity_output_type = TestIdentityOutputType()
if hybridize:
test_identity.hybridize()
test_identity_output_type.hybridize()
y = test_identity(x)
assert type(y) == np.ndarray
assert same(x.asnumpy() * _np.identity(n, dtype), y.asnumpy())
y = test_identity_output_type(x)
assert type(y[1]) == np.ndarray
@with_seed()
def test_np_ndarray_binary_element_wise_ops():
np_op_map = {
'+': _np.add,
'*': _np.multiply,
'-': _np.subtract,
'/': _np.divide,
'mod': _np.mod,
'pow': _np.power,
}
if is_op_runnable():
np_op_map.update({
'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal
})
def _get_grad_func(op, scalar=None, reverse=False):
if op == '+':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
collapse_sum_like(ograd, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: ograd
elif op == '-':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
-collapse_sum_like(ograd, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: -ograd
elif op == '*':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2, x1.shape),
collapse_sum_like(ograd * x1, x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd * x2
else:
return lambda ograd, x1, x2, out: ograd * x1
elif op == '/':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd / x2, x1.shape),
collapse_sum_like(-x1 * ograd / (x2 * x2), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd / x2
else:
return lambda ograd, x1, x2, out: -x1 * ograd / (x2 * x2)
elif op == 'mod':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd, x1.shape),
collapse_sum_like(-ograd * _np.floor(x1 / x2), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd
else:
return lambda ograd, x1, x2, out: -ograd * _np.floor(x1 / x2)
elif op == 'pow':
if scalar is None:
return lambda ograd, x1, x2, out: (collapse_sum_like(ograd * x2 * _np.power(x1, x2 - 1), x1.shape),
collapse_sum_like(ograd * out * _np.log(x1), x2.shape))
elif not reverse:
return lambda ograd, x1, x2, out: ograd * x2 * _np.power(x1, x2 - 1)
else:
return lambda ograd, x1, x2, out: ograd * out * _np.log(x1)
elif op in ('==', '!=', '<', '<=', '>', '>='):
if scalar is None:
return lambda ograd, x1, x2, out: (_np.zeros_like(x1), _np.zeros_like(x2))
else:
return lambda ograd, x1, x2, out: _np.zeros_like(ograd)
return None
def get_np_ret(x1, x2, op):
return np_op_map[op](x1, x2)
@use_np
class TestBinaryElementWiseOp(HybridBlock):
def __init__(self, op, scalar=None, reverse=False):
super(TestBinaryElementWiseOp, self).__init__()
self._op = op
self._scalar = scalar
self._reverse = reverse # if false, scalar is the right operand.
def hybrid_forward(self, F, x, *args):
if self._op == '+':
if self._scalar is not None:
return x + self._scalar if not self._reverse else self._scalar + x
else:
return x + args[0] if not self._reverse else args[0] + x
elif self._op == '*':
if self._scalar is not None:
return x * self._scalar if not self._reverse else self._scalar * x
else:
return x * args[0] if not self._reverse else args[0] * x
elif self._op == '-':
if self._scalar is not None:
return x - self._scalar if not self._reverse else self._scalar - x
else:
return x - args[0] if not self._reverse else args[0] - x
elif self._op == '/':
if self._scalar is not None:
return x / self._scalar if not self._reverse else self._scalar / x
else:
return x / args[0] if not self._reverse else args[0] / x
elif self._op == 'mod':
if self._scalar is not None:
return x % self._scalar if not self._reverse else self._scalar % x
else:
return x % args[0] if not self._reverse else args[0] % x
elif self._op == 'pow':
if self._scalar is not None:
return x ** self._scalar if not self._reverse else self._scalar ** x
else:
return x ** args[0] if not self._reverse else args[0] ** x
elif self._op == '>':
if self._scalar is not None:
return x > self._scalar if not self._reverse else self._scalar > x
else:
return x > args[0]
elif self._op == '>=':
if self._scalar is not None:
return x >= self._scalar if not self._reverse else self._scalar >= x
else:
return x >= args[0]
elif self._op == '<':
if self._scalar is not None:
return x < self._scalar if not self._reverse else self._scalar < x
else:
return x < args[0]
elif self._op == '<=':
if self._scalar is not None:
return x <= self._scalar if not self._reverse else self._scalar <= x
else:
return x <= args[0]
elif self._op == '==':
if self._scalar is not None:
return x == self._scalar if not self._reverse else self._scalar == x
else:
return x == args[0]
elif self._op == '!=':
if self._scalar is not None:
return x != self._scalar if not self._reverse else self._scalar != x
else:
return x != args[0]
else:
print(self._op)
assert False
logic_ops = ['==', '!=', '>', '<', '>=', '<=']
@use_np
def check_binary_op_result(shape1, shape2, op, dtype=None):
if shape1 is None:
mx_input1 = abs(_np.random.uniform()) + 1
np_input1 = mx_input1
else:
mx_input1 = (rand_ndarray(shape1, dtype=dtype).abs() + 1).as_np_ndarray()
mx_input1.attach_grad()
np_input1 = mx_input1.asnumpy()
if shape2 is None:
mx_input2 = abs(_np.random.uniform()) + 1
np_input2 = mx_input2
else:
mx_input2 = (rand_ndarray(shape2, dtype=dtype).abs() + 1).as_np_ndarray()
mx_input2.attach_grad()
np_input2 = mx_input2.asnumpy()
scalar = None
reverse = False
if isinstance(mx_input1, mx.nd.NDArray) and not isinstance(mx_input2, mx.nd.NDArray):
scalar = mx_input2
reverse = False
elif isinstance(mx_input2, mx.nd.NDArray) and not isinstance(mx_input1, mx.nd.NDArray):
scalar = mx_input1
reverse = True
grad_func = _get_grad_func(op, scalar, reverse)
np_out = get_np_ret(np_input1, np_input2, op)
ograd = _np.ones_like(np_out)
for hybridize in [True, False]:
if scalar is None:
get_mx_ret_np = TestBinaryElementWiseOp(op)
get_mx_ret_classic = TestBinaryElementWiseOp(op)
if hybridize:
get_mx_ret_np.hybridize()
get_mx_ret_classic.hybridize()
if grad_func is None:
mx_out = get_mx_ret_np(mx_input1, mx_input2)
else:
with mx.autograd.record():
mx_out = get_mx_ret_np(mx_input1, mx_input2)
mx_out.backward()
assert type(mx_out) == np.ndarray
if op in logic_ops:
assert np_out.dtype == mx_out.dtype
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)
if grad_func is not None:
x1_grad_expected, x2_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
assert_almost_equal(mx_input1.grad.asnumpy(), x1_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
assert_almost_equal(mx_input2.grad.asnumpy(), x2_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
else:
get_mx_ret = TestBinaryElementWiseOp(op, scalar=scalar, reverse=reverse)
if hybridize:
get_mx_ret.hybridize()
if reverse:
mx_input = mx_input2
else:
mx_input = mx_input1
if grad_func is None:
mx_out = get_mx_ret(mx_input)
else:
with mx.autograd.record():
mx_out = get_mx_ret(mx_input)
mx_out.backward()
assert type(mx_out) == np.ndarray
if op in logic_ops:
assert np_out.dtype == mx_out.dtype
assert_almost_equal(mx_out.asnumpy(), np_out, atol=1e-6, rtol=1e-5, use_broadcast=False)
# check grad
if grad_func is not None:
x_grad_expected = grad_func(ograd, np_input1, np_input2, np_out)
assert_almost_equal(mx_input.grad.asnumpy(), x_grad_expected, atol=1e-5, rtol=1e-3,
use_broadcast=False)
dtypes = [_np.float32, _np.float64, None]
ops = np_op_map.keys()
for dtype in dtypes:
for op in ops:
check_binary_op_result((3, 4), (3, 4), op, dtype)
check_binary_op_result(None, (3, 4), op, dtype)
check_binary_op_result((3, 4), None, op, dtype)
check_binary_op_result((1, 4), (3, 1), op, dtype)
check_binary_op_result(None, (3, 1), op, dtype)
check_binary_op_result((1, 4), None, op, dtype)
check_binary_op_result((1, 4), (3, 5, 4), op, dtype)
check_binary_op_result((), (3, 5, 4), op, dtype)
check_binary_op_result((), None, op, dtype)
check_binary_op_result(None, (), op, dtype)
check_binary_op_result((0, 2), (1, 1), op, dtype)
check_binary_op_result((0, 2), None, op, dtype)
check_binary_op_result(None, (0, 2), op, dtype)
@with_seed()
def test_np_hybrid_block_multiple_outputs():
@use_np
class TestAllNumpyOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return F.np.add(x, x), F.np.multiply(x, x)
class TestAllClassicOutputs(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x.as_nd_ndarray() + x.as_nd_ndarray(), x.as_nd_ndarray() * x.as_nd_ndarray()
data_np = np.ones((2, 3))
for block, expected_out_type in [(TestAllClassicOutputs, mx.nd.NDArray),
(TestAllNumpyOutputs, np.ndarray)]:
net = block()
for hybridize in [True, False]:
if hybridize:
net.hybridize()
out1, out2 = net(data_np)
assert type(out1) is expected_out_type
assert type(out2) is expected_out_type
@use_np
class TestMixedTypeOutputsFailure(HybridBlock):
def hybrid_forward(self, F, x, *args, **kwargs):
return x.as_nd_ndarray() + x.as_nd_ndarray(), F.np.multiply(x, x)
net = TestMixedTypeOutputsFailure()
assert_exception(net, TypeError, data_np)
net.hybridize()
assert_exception(net, TypeError, data_np)
@with_seed()
@use_np
def test_np_grad_ndarray_type():
data = np.array(2, dtype=_np.float32)
data.attach_grad()
assert type(data.grad) == np.ndarray
assert type(data.detach()) == np.ndarray
@with_seed()
@use_np
def test_np_ndarray_astype():
class TestAstype(HybridBlock):
def __init__(self, dtype, copy):
super(TestAstype, self).__init__()
self._dtype = dtype
self._copy = copy
def hybrid_forward(self, F, x):
return x.astype(dtype=self._dtype, copy=self._copy)
def check_astype_equal(itype, otype, copy, expect_zero_copy=False, hybridize=False):
expect_zero_copy = copy is False and itype == otype
mx_data = np.array([2, 3, 4, 5], dtype=itype)
np_data = mx_data.asnumpy()
test_astype = TestAstype(otype, copy)
if hybridize:
test_astype.hybridize()
mx_ret = test_astype(mx_data)
assert type(mx_ret) is np.ndarray
np_ret = np_data.astype(dtype=otype, copy=copy)
assert mx_ret.dtype == np_ret.dtype
assert same(mx_ret.asnumpy(), np_ret)
if expect_zero_copy and not hybridize:
assert id(mx_ret) == id(mx_data)
assert id(np_ret) == id(np_data)
dtypes = [np.int8, np.uint8, np.int32, np.float16, np.float32, np.float64, np.bool, np.bool_,
'int8', 'uint8', 'int32', 'float16', 'float32', 'float64', 'bool']
for itype, otype in itertools.product(dtypes, dtypes):
for copy in [True, False]:
for hybridize in [True, False]:
check_astype_equal(itype, otype, copy, hybridize)
@with_seed()
def test_np_ndarray_copy():
mx_data = np.array([2, 3, 4, 5], dtype=_np.int32)
assert_exception(mx_data.copy, NotImplementedError, order='F')
mx_ret = mx_data.copy()
np_ret = mx_data.asnumpy().copy()
assert same(mx_ret.asnumpy(), np_ret)
@with_seed()
@use_np
def test_np_ndarray_indexing():
def np_int(index, int_type=np.int32):
"""
Helper function for testing indexing that converts slices to slices of ints or None, and tuples to
tuples of ints or None.
"""
def convert(num):
if num is None:
return num
else:
return int_type(num)
if isinstance(index, slice):
return slice(convert(index.start), convert(index.stop), convert(index.step))
elif isinstance(index, tuple): # tuple of slices and integers
ret = []
for elem in index:
if isinstance(elem, slice):
ret.append(slice(convert(elem.start), convert(elem.stop), convert(elem.step)))
else:
ret.append(convert(elem))
return tuple(ret)
else:
assert False
# Copied from test_ndarray.py. Under construction.
def test_getitem(np_array, index):
np_index = index
if type(index) == mx.nd.NDArray: # use of NDArray is prohibited
assert False
if isinstance(index, np.ndarray):
np_index = index.asnumpy()
if isinstance(index, tuple):
np_index = tuple([
idx.asnumpy() if isinstance(idx, mx.nd.NDArray) else idx
for idx in index]
)
np_indexed_array = np_array[np_index]
mx_np_array = np.array(np_array, dtype=np_array.dtype)
for autograd in [True, False]:
try:
if autograd:
with mx.autograd.record():
mx_indexed_array = mx_np_array[index]
else:
mx_indexed_array = mx_np_array[index]
except Exception as e:
print('Failed with index = {}'.format(index))
raise e
mx_indexed_array = mx_indexed_array.asnumpy()
assert same(np_indexed_array, mx_indexed_array), 'Failed with index = {}'.format(index)
def test_setitem(np_array, index):
def assert_same(np_array, np_index, mx_array, mx_index, mx_value, np_value=None):
if np_value is not None:
np_array[np_index] = np_value
elif isinstance(mx_value, np.ndarray):
np_array[np_index] = mx_value.asnumpy()
else:
np_array[np_index] = mx_value
try:
mx_array[mx_index] = mx_value
except Exception as e:
print('Failed with index = {}, value.shape = {}'.format(mx_index, mx_value.shape))
raise e
assert same(np_array, mx_array.asnumpy())
def _is_basic_index(index):
if isinstance(index, (integer_types, py_slice)):
return True
if isinstance(index, tuple) and all(isinstance(i, (integer_types, py_slice)) for i in index):
return True
return False
np_index = index # keep this native numpy type
if isinstance(index, np.ndarray):
np_index = index.asnumpy()
if isinstance(index, tuple):
np_index = []
for idx in index:
if isinstance(idx, np.ndarray):
np_index.append(idx.asnumpy())
else:
np_index.append(idx)
np_index = tuple(np_index)
mx_array = np.array(np_array, dtype=np_array.dtype) # mxnet.np.ndarray
np_array = mx_array.asnumpy() # native numpy array
indexed_array_shape = np_array[np_index].shape
np_indexed_array = _np.random.randint(low=-10000, high=0, size=indexed_array_shape)
# test value is a native numpy array without broadcast
assert_same(np_array, np_index, mx_array, index, np_indexed_array)
# test value is a list without broadcast
assert_same(np_array, np_index, mx_array, index, np_indexed_array.tolist())
# test value is a mxnet numpy array without broadcast
assert_same(np_array, np_index, mx_array, index, np.array(np_indexed_array))
# test value is an numeric_type
assert_same(np_array, np_index, mx_array, index, _np.random.randint(low=-10000, high=0))
np_value = _np.random.randint(low=-10000, high=0,
size=(indexed_array_shape[-1],) if len(indexed_array_shape) > 0 else ())
# test mxnet ndarray with broadcast
assert_same(np_array, np_index, mx_array, index, np.array(np_value))
# test native numpy array with broadcast
assert_same(np_array, np_index, mx_array, index, np_value)
# test python list with broadcast
assert_same(np_array, np_index, mx_array, index, np_value.tolist())
# test value shape are expanded to be longer than index array's shape
# this is currently only supported in basic indexing
if _is_basic_index(index):
expanded_value_shape = (1, 1) + np_value.shape
assert_same(np_array, np_index, mx_array, index, np.array(np_value.reshape(expanded_value_shape)))
assert_same(np_array, np_index, mx_array, index, np_value.reshape(expanded_value_shape))
if len(expanded_value_shape) <= np_array[index].ndim:
# NumPy does not allow value.ndim > np_array[index].ndim when value is a python list.
# It may be a bug of NumPy.
assert_same(np_array, np_index, mx_array, index, np_value.reshape(expanded_value_shape).tolist())
# test list with broadcast
assert_same(np_array, np_index, mx_array, index,
[_np.random.randint(low=-10000, high=0)] * indexed_array_shape[-1] if len(indexed_array_shape) > 0
else _np.random.randint(low=-10000, high=0))
def test_getitem_autograd(np_array, index):
"""
np_array: native numpy array.
"""
x = np.array(np_array, dtype=np_array.dtype)
x.attach_grad()
with mx.autograd.record():
y = x[index]
y.backward()
value = np.ones_like(y)
x_grad = np.zeros_like(x)
x_grad[index] = value
assert same(x_grad.asnumpy(), x.grad.asnumpy())
def test_setitem_autograd(np_array, index):
"""
np_array: native numpy array.
"""
x = np.array(np_array, dtype=np_array.dtype)
out_shape = x[index].shape
y = np.array(_np.random.uniform(size=out_shape))
y.attach_grad()
try:
with mx.autograd.record():
x[index] = y
x.backward()
y_grad = np.ones_like(y)
assert same(y_grad.asnumpy(), y.grad.asnumpy())
except mx.base.MXNetError as err:
assert str(err).find('Inplace operations (+=, -=, x[:]=, etc) are not supported when recording with') != -1
shape = (8, 16, 9, 9)
np_array = _np.arange(_np.prod(_np.array(shape)), dtype='int32').reshape(shape) # native np array
# Test sliced output being ndarray:
index_list = [
(),
# Basic indexing
# Single int as index
0,
np.int32(0),
np.int64(0),
5,
np.int32(5),
np.int64(5),
-1,
np.int32(-1),
np.int64(-1),
# Slicing as index
slice(5),
np_int(slice(5), np.int32),
np_int(slice(5), np.int64),
slice(1, 5),
np_int(slice(1, 5), np.int32),
np_int(slice(1, 5), np.int64),
slice(1, 5, 2),
slice(1, 2, 2),
np_int(slice(1, 5, 2), np.int32),
np_int(slice(1, 5, 2), np.int64),
slice(7, 0, -1),
np_int(slice(7, 0, -1)),
np_int(slice(7, 0, -1), np.int64),
slice(None, 6),
np_int(slice(None, 6)),
np_int(slice(None, 6), np.int64),
slice(None, 6, 3),
np_int(slice(None, 6, 3)),
np_int(slice(None, 6, 3), np.int64),
slice(1, None),
np_int(slice(1, None)),
np_int(slice(1, None), np.int64),
slice(1, None, 3),
np_int(slice(1, None, 3)),
np_int(slice(1, None, 3), np.int64),
slice(None, None, 2),
np_int(slice(None, None, 2)),
np_int(slice(None, None, 2), np.int64),
slice(None, None, -1),
np_int(slice(None, None, -1)),
np_int(slice(None, None, -1), np.int64),
slice(None, None, -2),
np_int(slice(None, None, -2), np.int32),
np_int(slice(None, None, -2), np.int64),
# Multiple ints as indices
(1, 2, 3),
np_int((1, 2, 3)),
np_int((1, 2, 3), np.int64),
(-1, -2, -3),
np_int((-1, -2, -3)),
np_int((-1, -2, -3), np.int64),
(1, 2, 3, 4),
np_int((1, 2, 3, 4)),
np_int((1, 2, 3, 4), np.int64),
(-4, -3, -2, -1),
np_int((-4, -3, -2, -1)),
np_int((-4, -3, -2, -1), np.int64),
# slice(None) as indices
(slice(None), slice(None), 1, 8),
(slice(None), slice(None), -1, 8),
(slice(None), slice(None), 1, -8),
(slice(None), slice(None), -1, -8),
np_int((slice(None), slice(None), 1, 8)),
np_int((slice(None), slice(None), 1, 8), np.int64),
(slice(None), slice(None), 1, 8),
np_int((slice(None), slice(None), -1, -8)),
np_int((slice(None), slice(None), -1, -8), np.int64),
(slice(None), 2, slice(1, 5), 1),
np_int((slice(None), 2, slice(1, 5), 1)),
np_int((slice(None), 2, slice(1, 5), 1), np.int64),
# Mixture of ints and slices as indices
(slice(None, None, -1), 2, slice(1, 5), 1),
np_int((slice(None, None, -1), 2, slice(1, 5), 1)),
np_int((slice(None, None, -1), 2, slice(1, 5), 1), np.int64),
(slice(None, None, -1), 2, slice(1, 7, 2), 1),
np_int((slice(None, None, -1), 2, slice(1, 7, 2), 1)),
np_int((slice(None, None, -1), 2, slice(1, 7, 2), 1), np.int64),
(slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3)),
np_int((slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3))),
np_int((slice(1, 8, 2), slice(14, 2, -2), slice(3, 8), slice(0, 7, 3)), np.int64),
(slice(1, 8, 2), 1, slice(3, 8), 2),
np_int((slice(1, 8, 2), 1, slice(3, 8), 2)),
np_int((slice(1, 8, 2), 1, slice(3, 8), 2), np.int64),
# Test Ellipsis ('...')
(1, Ellipsis, -1),
(slice(2), Ellipsis, None, 0),
# Test newaxis
None,
(1, None, -2, 3, -4),
(1, slice(2, 5), None),
(slice(None), slice(1, 4), None, slice(2, 3)),
(slice(1, 3), slice(1, 3), slice(1, 3), slice(1, 3), None),
(slice(1, 3), slice(1, 3), None, slice(1, 3), slice(1, 3)),
(None, slice(1, 2), 3, None),
(1, None, 2, 3, None, None, 4),
# Advanced indexing
([1, 2], slice(3, 5), None, None, [3, 4]),
(slice(None), slice(3, 5), None, None, [2, 3], [3, 4]),
(slice(None), slice(3, 5), None, [2, 3], None, [3, 4]),
(None, slice(None), slice(3, 5), [2, 3], None, [3, 4]),
[1],
[1, 2],
[2, 1, 3],
[7, 5, 0, 3, 6, 2, 1],
np.array([6, 3], dtype=np.int32),
np.array([[3, 4], [0, 6]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int64),
np.array([[2], [0], [1]], dtype=np.int32),
np.array([[2], [0], [1]], dtype=np.int64),
np.array([4, 7], dtype=np.int32),
np.array([4, 7], dtype=np.int64),
np.array([[3, 6], [2, 1]], dtype=np.int32),
np.array([[3, 6], [2, 1]], dtype=np.int64),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int32),
np.array([[7, 3], [2, 6], [0, 5], [4, 1]], dtype=np.int64),
(1, [2, 3]),
(1, [2, 3], np.array([[3], [0]], dtype=np.int32)),
(1, [2, 3]),
(1, [2, 3], np.array([[3], [0]], dtype=np.int64)),
(1, [2], np.array([[5], [3]], dtype=np.int32), slice(None)),
(1, [2], np.array([[5], [3]], dtype=np.int64), slice(None)),
(1, [2, 3], np.array([[6], [0]], dtype=np.int32), slice(2, 5)),
(1, [2, 3], np.array([[6], [0]], dtype=np.int64), slice(2, 5)),
(1, [2, 3], np.array([[4], [7]], dtype=np.int32), slice(2, 5, 2)),
(1, [2, 3], np.array([[4], [7]], dtype=np.int64), slice(2, 5, 2)),
(1, [2], np.array([[3]], dtype=np.int32), slice(None, None, -1)),
(1, [2], np.array([[3]], dtype=np.int64), slice(None, None, -1)),
(1, [2], np.array([[3]], dtype=np.int32), np.array([[5, 7], [2, 4]], dtype=np.int64)),
(1, [2], np.array([[4]], dtype=np.int32), np.array([[1, 3], [5, 7]], dtype='int64')),
[0],
[0, 1],
[1, 2, 3],
[2, 0, 5, 6],
([1, 1], [2, 3]),
([1], [4], [5]),
([1], [4], [5], [6]),
([[1]], [[2]]),
([[1]], [[2]], [[3]], [[4]]),
(slice(0, 2), [[1], [6]], slice(0, 2), slice(0, 5, 2)),
([[[[1]]]], [[1]], slice(0, 3), [1, 5]),
([[[[1]]]], 3, slice(0, 3), [1, 3]),
([[[[1]]]], 3, slice(0, 3), 0),
([[[[1]]]], [[2], [12]], slice(0, 3), slice(None)),
([1, 2], slice(3, 5), [2, 3], [3, 4]),
([1, 2], slice(3, 5), (2, 3), [3, 4]),
range(4),
range(3, 0, -1),
(range(4,), [1]),
(1, 1, slice(None), 1),
(1, 1, slice(None, 3), 1),
(1, 1, slice(None, 8, 3), 1),
]
for index in index_list:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
# Test indexing to zero-size tensors
index_list = [
(slice(0, 0), slice(0, 0), 1, 2),
(slice(0, 0), slice(0, 0), slice(0, 0), slice(0, 0)),
]
for index in index_list:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
# test zero-size tensors get and setitem
shapes_indices = [
((0), [slice(None, None, None)]),
((3, 0), [2, (slice(None, None, None)), (slice(None, None, None), None)]),
]
for shape, indices in shapes_indices:
np_array = _np.zeros(shape)
for index in indices:
test_getitem(np_array, index)
test_setitem(np_array, index)
test_getitem_autograd(np_array, index)
test_setitem_autograd(np_array, index)
@with_seed()
@use_np
def test_np_save_load_ndarrays():
shapes = [(2, 0, 1), (0,), (), (), (0, 4), (), (3, 0, 0, 0), (2, 1), (0, 5, 0), (4, 5, 6), (0, 0, 0)]
array_list = [_np.random.randint(0, 10, size=shape) for shape in shapes]
array_list = [np.array(arr, dtype=arr.dtype) for arr in array_list]
# test save/load single ndarray
for i, arr in enumerate(array_list):
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, arr)
arr_loaded = npx.load(fname)
assert isinstance(arr_loaded, list)
assert len(arr_loaded) == 1
assert _np.array_equal(arr_loaded[0].asnumpy(), array_list[i].asnumpy())
# test save/load a list of ndarrays
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, array_list)
array_list_loaded = mx.nd.load(fname)
assert isinstance(arr_loaded, list)
assert len(array_list) == len(array_list_loaded)
assert all(isinstance(arr, np.ndarray) for arr in arr_loaded)
for a1, a2 in zip(array_list, array_list_loaded):
assert _np.array_equal(a1.asnumpy(), a2.asnumpy())
# test save/load a dict of str->ndarray
arr_dict = {}
keys = [str(i) for i in range(len(array_list))]
for k, v in zip(keys, array_list):
arr_dict[k] = v
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'dataset.npy')
npx.save(fname, arr_dict)
arr_dict_loaded = npx.load(fname)
assert isinstance(arr_dict_loaded, dict)
assert len(arr_dict_loaded) == len(arr_dict)
for k, v in arr_dict_loaded.items():
assert k in arr_dict
assert _np.array_equal(v.asnumpy(), arr_dict[k].asnumpy())
@retry(5)
@with_seed()
@use_np
def test_np_uniform():
types = [None, "float32", "float64"]
ctx = mx.context.current_context()
samples = 1000000
# Generation test
trials = 8
num_buckets = 5
for dtype in types:
for low, high in [(-100.0, -98.0), (99.0, 101.0)]:
scale = high - low
buckets, probs = gen_buckets_probs_with_ppf(lambda x: ss.uniform.ppf(x, loc=low, scale=scale), num_buckets)
buckets = np.array(buckets, dtype=dtype).tolist()
probs = [(buckets[i][1] - buckets[i][0])/scale for i in range(num_buckets)]
generator_mx_np = lambda x: mx.np.random.uniform(low, high, size=x, ctx=ctx, dtype=dtype).asnumpy()
verify_generator(generator=generator_mx_np, buckets=buckets, probs=probs, nsamples=samples, nrepeat=trials)
# Broadcasting test
params = [
(1.0, mx.np.ones((4,4)) + 2.0),
(mx.np.zeros((4,4)) + 1, 2.0),
(mx.np.zeros((1,4)), mx.np.ones((4,4)) + mx.np.array([1, 2, 3, 4])),
(mx.np.array([1, 2, 3, 4]), mx.np.ones((2,4,4)) * 5)
]
for dtype in types:
for low, high in params:
expect_mean = (low + high) / 2
expanded_size = (samples,) + expect_mean.shape
uniform_samples = mx.np.random.uniform(low, high, size=expanded_size, dtype=dtype)
mx.test_utils.assert_almost_equal(uniform_samples.asnumpy().mean(0), expect_mean.asnumpy(), rtol=0.20, atol=1e-1)
@retry(5)
@with_seed()
@use_np
def test_np_multinomial():
pvals_list = [[0.0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0.0]]
sizes = [None, (), (3,), (2, 5, 7), (4, 9)]
experiements = 10000
for pvals_mx_np_array in [False, True]:
for have_size in [False, True]:
for pvals in pvals_list:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
if have_size:
for size in sizes:
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy() / _np.float32(experiements)
# for those cases that didn't need reshape
if size in [None, ()]:
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq, pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq, pvals, rtol=0.20, atol=1e-1)
else:
# check the shape
assert freq.shape == size + (len(pvals),), 'freq.shape={}, size + (len(pvals))={}'.format(freq.shape, size + (len(pvals)))
freq = freq.reshape((-1, len(pvals)))
# check the value for each row
for i in range(freq.shape[0]):
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq[i, :], pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1)
else:
freq = mx.np.random.multinomial(experiements, pvals).asnumpy() / _np.float32(experiements)
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq, pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq, pvals, rtol=0.20, atol=1e-1)
# check the zero dimension
sizes = [(0), (0, 2), (4, 0, 2), (3, 0, 1, 2, 0)]
for pvals_mx_np_array in [False, True]:
for pvals in pvals_list:
for size in sizes:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy()
assert freq.size == 0
# check [] as pvals
for pvals_mx_np_array in [False, True]:
for pvals in [[], ()]:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
freq = mx.np.random.multinomial(experiements, pvals).asnumpy()
assert freq.size == 0
for size in sizes:
freq = mx.np.random.multinomial(experiements, pvals, size=size).asnumpy()
assert freq.size == 0
# test small experiment for github issue
# https://github.com/apache/incubator-mxnet/issues/15383
small_exp, total_exp = 20, 10000
for pvals_mx_np_array in [False, True]:
for pvals in pvals_list:
if pvals_mx_np_array:
pvals = mx.np.array(pvals)
x = np.random.multinomial(small_exp, pvals)
for i in range(total_exp // small_exp):
x = x + np.random.multinomial(20, pvals)
freq = (x.asnumpy() / _np.float32(total_exp)).reshape((-1, len(pvals)))
for i in range(freq.shape[0]):
if type(pvals) == np.ndarray:
mx.test_utils.assert_almost_equal(freq[i, :], pvals.asnumpy(), rtol=0.20, atol=1e-1)
else:
mx.test_utils.assert_almost_equal(freq[i, :], pvals, rtol=0.20, atol=1e-1)
@with_seed()
@unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with"
" compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
def test_single_bool_index():
# adapted from numpy's test_indexing.py
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.int32)
assert same(a[np.array(True, dtype=np.bool_)].asnumpy(), a[None].asnumpy())
assert same(a[np.array(False, dtype=np.bool_)].asnumpy(), a[None][0:0].asnumpy())
def test_boolean_catch_exception():
# adapted from numpy's test_indexing.py
arr = np.ones((5, 4, 3))
index = np.array([True], dtype=np.bool_)
assert_exception(arr.__getitem__, IndexError, index)
index = np.array([False] * 6, dtype=np.bool_)
assert_exception(arr.__getitem__, IndexError, index)
index = np.zeros((4, 4), dtype=bool)
assert_exception(arr.__getitem__, IndexError, index)
assert_exception(arr.__getitem__, TypeError, (slice(None), index))
def test_boolean_indexing_onedim():
# adapted from numpy's test_indexing.py
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[0., 0., 0.]])
b = np.array([True], dtype=bool)
assert same(a[b].asnumpy(), a.asnumpy())
def test_boolean_indexing_twodim():
# adapted from numpy's test_indexing.py
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.int32)
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=np.bool_)
assert same(a[b].asnumpy(), _np.array([1, 3, 5, 7, 9], dtype=a.dtype))
assert same(a[b[1]].asnumpy(), _np.array([[4, 5, 6]], dtype=a.dtype))
assert same(a[b[0]].asnumpy(), a[b[2]].asnumpy())
def test_boolean_indexing_list():
# adapted from numpy's test_indexing.py
a = np.array([1, 2, 3], dtype=np.int32)
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert same(a[b].asnumpy(), _np.array([1, 3], dtype=a.dtype))
(a[None, b], [[1, 3]])
def test_boolean_indexing_autograd():
a = np.random.uniform(size=(3, 4, 5))
a.attach_grad()
with mx.autograd.record():
out_mx = a[a < 0.5]
out_mx.backward()
a_np = a.asnumpy()
out_np = a_np[a_np < 0.5]
assert_almost_equal(out_mx.asnumpy(), out_np, rtol=1e-4, atol=1e-5, use_broadcast=False)
a_grad_np = _np.zeros(a.shape, dtype=a.dtype)
a_grad_np[a_np < 0.5] = 1
assert_almost_equal(a.grad.asnumpy(), a_grad_np, rtol=1e-4, atol=1e-5, use_broadcast=False)
test_single_bool_index()
test_boolean_catch_exception()
test_boolean_indexing_onedim()
test_boolean_indexing_twodim()
test_boolean_indexing_list()
test_boolean_indexing_autograd()
@with_seed()
@use_np
def test_np_get_dtype():
dtypes = [_np.int8, _np.int32, _np.float16, _np.float32, _np.float64, _np.bool, _np.bool_,
'int8', 'int32', 'float16', 'float32', 'float64', 'bool', None]
objects = [
[],
(),
[[1, 2], [3, 4]],
_np.random.uniform(size=rand_shape_nd(3)),
_np.random.uniform(size=(3, 0, 4))
]
for dtype in dtypes:
for src in objects:
mx_arr = np.array(src, dtype=dtype)
assert mx_arr.ctx == mx.current_context()
if isinstance(src, mx.nd.NDArray):
np_arr = _np.array(src.asnumpy(), dtype=dtype if dtype is not None else _np.float32)
else:
np_arr = _np.array(src, dtype=dtype if dtype is not None else _np.float32)
assert type(mx_arr.dtype) == type(np_arr.dtype)
@use_np
def test_np_ndarray_pickle():
a = np.random.uniform(size=(4, 5))
a_copy = a.copy()
import pickle
with TemporaryDirectory() as work_dir:
fname = os.path.join(work_dir, 'np_ndarray_pickle_test_file')
with open(fname, 'wb') as f:
pickle.dump(a_copy, f)
with open(fname, 'rb') as f:
a_load = pickle.load(f)
same(a.asnumpy(), a_load.asnumpy())
if __name__ == '__main__':
import nose
nose.runmodule()
| apache-2.0 | -9,156,711,259,813,719,000 | 40.242054 | 150 | 0.519366 | false |
Diiaablo95/friendsNet | test/services_api_test_group_requests.py | 1 | 10716 | import unittest
import json
import flask
import friendsNet.resources as resources
import friendsNet.database as database
DB_PATH = 'db/friendsNet_test.db'
ENGINE = database.Engine(DB_PATH)
COLLECTION_JSON = "application/vnd.collection+json"
HAL_JSON = "application/hal+json"
GROUP_MEMBERSHIP_REQUEST_PROFILE = "/profiles/request-profile"
#Tell Flask that I am running it in testing mode.
resources.app.config['TESTING'] = True
#Necessary for correct translation in url_for
resources.app.config['SERVER_NAME'] = 'localhost:5000'
#Database Engine utilized in our testing
resources.app.config.update({'Engine': ENGINE})
class ResourcesAPITestCase(unittest.TestCase):
#INITIATION AND TEARDOWN METHODS
@classmethod
def setUpClass(cls):
''' Creates the database structure. Removes first any preexisting database file.'''
print "Testing ", cls.__name__
ENGINE.remove_database()
ENGINE.create_tables()
@classmethod
def tearDownClass(cls):
'''Remove the testing database.'''
print "Testing ENDED for ", cls.__name__
ENGINE.remove_database()
def setUp(self):
'''Populates the database.'''
#This method loads the initial values from friendsNet_data_db.sql
ENGINE.populate_tables()
#Activate app_context for using url_for
self.app_context = resources.app.app_context()
self.app_context.push()
#Create a test client
self.client = resources.app.test_client()
def tearDown(self):
'''
Remove all records from database.
'''
ENGINE.clear()
self.app_context.pop()
class GroupRequestsTestCase(ResourcesAPITestCase):
resp_get = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/groups/2/requests/",
"links" : [
{"href" : "/friendsNet/api/groups/", "rel" : "groups", "prompt" : "Groups"},
{"href" : "/friendsNet/api/groups/2/", "rel" : "group", "prompt" : "Group"},
{"href" : "/friendsNet/api/groups/2/members/", "group members" : "group members", "prompt" : "Group members"},
{"href" : "/friendsNet/api/groups/2/statuses/", "rel" : "group statuses", "prompt" : "Group statuses"}
],
"items" : [
{
"href" : "/friendsNet/api/groups/2/requests/1/",
"data" : [
{"name" : "id", "value" : 2, "prompt" : "Group id"},
{"name" : "user_id", "value" : 1, "prompt" : "User id"}
],
"links" : [
{"href" : "/friendsNet/api/users/1/profile/", "rel" : "user profile", "prompt" : "User profile",},
{"href" : "/friendsNet/api/users/1/groups/", "rel" : "user memberships", "prompt" : "User memberships"}
]
},
{
"href" : "/friendsNet/api/groups/2/requests/4/",
"data" : [
{"name" : "id", "value" : 2, "prompt" : "Group id"},
{"name" : "user_id", "value" : 4, "prompt" : "User id"}
],
"links" : [
{"href" : "/friendsNet/api/users/4/profile/", "rel" : "user profile", "prompt" : "User profile"},
{"href" : "/friendsNet/api/users/4/groups/", "rel" : "user memberships", "prompt" : "User memberships"}
]
}
],
"template" : {
"data" : [
{"name" : "user_id", "value" : "", "prompt" : "User requesting id", "required" : "true"}
]
}
}
}
resp_get_empty = {
"collection" : {
"version" : "1.0",
"href" : "/friendsNet/api/groups/3/requests/",
"links" : [
{"href" : "/friendsNet/api/groups/", "rel" : "groups", "prompt" : "Groups"},
{"href" : "/friendsNet/api/groups/3/", "rel" : "group", "prompt" : "Group"},
{"href" : "/friendsNet/api/groups/3/members/", "group members" : "group members", "prompt" : "Group members"},
{"href" : "/friendsNet/api/groups/3/statuses/", "rel" : "group statuses", "prompt" : "Group statuses"}
],
"items" : [],
"template" : {
"data" : [
{"name" : "user_id", "value" : "", "prompt" : "User requesting id", "required" : "true"}
]
}
}
}
request_post_correct = {
"template" : {
"data" : [
{"name" : "user_id", "value" : 5}
]
}
}
request_post_wrong = {
"template" : {
"data" : [
{"name" : "user_id", "value" : "err"}
]
}
}
request_post_existing_request = {
"template" : {
"data" : [
{"name" : "user_id", "value" : 1}
]
}
}
request_post_existing_member = {
"template" : {
"data" : [
{"name" : "user_id", "value" : 3}
]
}
}
def setUp(self):
super(GroupRequestsTestCase, self).setUp()
self.url = resources.api.url_for(resources.Group_requests,group_id = 2, _external = False)
self.url_empty = resources.api.url_for(resources.Group_requests, group_id = 3, _external = False)
self.url_wrong_privacy = resources.api.url_for(resources.Group_requests, group_id = 1, _external = False)
self.url_wrong = resources.api.url_for(resources.Group_requests, group_id = 999, _external = False)
def test_url(self):
#Checks that the URL points to the right resource
_url = '/friendsNet/api/groups/2/requests/'
print '('+self.test_url.__name__+')', self.test_url.__doc__
with resources.app.test_request_context(_url):
rule = flask.request.url_rule
view_point = resources.app.view_functions[rule.endpoint].view_class
self.assertEquals(view_point, resources.Group_requests)
def test_wrong_url(self):
resp = self.client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
data = json.loads(resp.data)["collection"]
version = data["version"] #test VERSION
self.assertEquals(version, self.resp_get["collection"]["version"])
href = data["href"] #test HREF
self.assertEquals(href, self.url_wrong)
error = data["error"]
self.assertEquals(error["code"], 404)
#TEST GET
#200 + MIMETYPE & PROFILE
def test_get_requests(self):
print '('+self.test_get_requests.__name__+')', self.test_get_requests.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + GROUP_MEMBERSHIP_REQUEST_PROFILE)
#EMPTY ITEMS
def test_get_empty_requests(self):
print '('+self.test_get_empty_requests.__name__+')', self.test_get_empty_requests.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_empty, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEquals(self.resp_get_empty, data)
self.assertEqual(resp.headers.get("Content-Type", None), COLLECTION_JSON + ";profile=" + GROUP_MEMBERSHIP_REQUEST_PROFILE)
#404
def test_get_not_existing_group(self):
print '('+self.test_get_not_existing_group.__name__+')', self.test_get_not_existing_group.__doc__
with resources.app.test_client() as client:
resp = client.get(self.url_wrong, headers = {"Accept" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
#TEST POST
#201
def test_post_request(self):
print '('+self.test_post_request.__name__+')', self.test_post_request.__doc__
resp = self.client.post(self.url, data = json.dumps(self.request_post_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 201)
self.assertIn("Location", resp.headers)
new_url = resp.headers["Location"]
resp2 = self.client.get(new_url, headers = {"Accept" : HAL_JSON})
self.assertEquals(resp2.status_code, 200)
#400
def test_post_wrong_privacy_group(self):
print '('+self.test_post_wrong_privacy_group.__name__+')', self.test_post_wrong_privacy_group.__doc__
resp = self.client.post(self.url_wrong_privacy, data = json.dumps(self.request_post_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 400)
#404
def test_post_not_existing_group(self):
print '('+self.test_post_not_existing_group.__name__+')', self.test_post_not_existing_group.__doc__
resp = self.client.post(self.url_wrong, data = json.dumps(self.request_post_correct), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 404)
#409
#request existing for the user to the group
def test_post_request_existing_request(self):
print '('+self.test_post_request_existing_request.__name__+')', self.test_post_request_existing_request.__doc__
resp = self.client.post(self.url, data = json.dumps(self.request_post_existing_request), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 409)
#409
#user already member of the group
def test_post_request_existing_membership(self):
print '('+self.test_post_request_existing_membership.__name__+')', self.test_post_request_existing_membership.__doc__
resp = self.client.post(self.url, data = json.dumps(self.request_post_existing_member), headers = {"Content-Type" : COLLECTION_JSON})
self.assertEquals(resp.status_code, 409)
#415
def test_post_wrong_header_request(self):
print '('+self.test_post_wrong_header_request.__name__+')', self.test_post_wrong_header_request.__doc__
resp = self.client.post(self.url_wrong, data = json.dumps(self.request_post_correct))
self.assertEquals(resp.status_code, 415)
if __name__ == '__main__':
print 'Start running tests'
unittest.main() | gpl-3.0 | 1,362,417,376,660,279,800 | 40.863281 | 147 | 0.56467 | false |
bo0ts/cgal-testsuite-dockerfiles | cgal_docker_args.py | 1 | 4043 | import logging
import argparse
import sys
import shlex
from os import path
from socket import gethostname
from getpass import getuser
from multiprocessing import cpu_count
from xdg.BaseDirectory import load_first_config, xdg_config_home
class CustomArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(CustomArgumentParser, self).__init__(*args, **kwargs)
def convert_arg_line_to_args(self, line):
return shlex.split(line, comments=True)
def parser():
"""Return the command line argument parser for test_cgal"""
parser = CustomArgumentParser(
description='''This script launches docker containers which run the CGAL testsuite.''',
fromfile_prefix_chars='@')
# Testing related arguments
parser.add_argument('--images', nargs='*',
help='List of images to launch, defaults to all prefixed with cgal-testsuite')
parser.add_argument('--testsuite', metavar='/path/to/testsuite',
help='Absolute path where the release is going to be stored.',
default=path.abspath('./testsuite'))
parser.add_argument('--testresults', metavar='/path/to/testresults',
help='Absolute path where the testresults are going to be stored.',
default=path.abspath('./testresults'))
parser.add_argument('--packages', nargs='*',
help='List of package base names to run the tests for. Will always include Installation.'
'e.g. AABB_tree will run AABB_tree, AABB_tree_Examples, and AABB_tree_Demo.')
# Docker related arguments
parser.add_argument('--docker-url', metavar='protocol://hostname/to/docker.sock[:PORT]',
default='unix://var/run/docker.sock',
help='The protocol+hostname+port where the Docker server is hosted.')
parser.add_argument('--force-rm', action='store_true',
help='If a container with the same name already exists, force it to quit')
parser.add_argument('--max-cpus', metavar='N', default=cpu_count(), type=int,
help='The maximum number of CPUs the testsuite is allowed to use at a single time. Defaults to all available cpus.')
parser.add_argument('--container-cpus', metavar='N', default=1, type=int,
help='The number of CPUs a single container should have. Defaults to one.')
parser.add_argument('--jobs', metavar='N', default=None, type=int,
help='The number of jobs a single container is going to launch. Defaults to --container-cpus.')
parser.add_argument('--use-fedora-selinux-policy', action='store_true',
help='Mount volumes with z option to accomodate SELinux on Fedora.')
# Download related arguments
parser.add_argument('--use-local', action='store_true',
help='Use a local extracted CGAL release. --testsuite must be set to that release.')
# TODO make internal releases and latest public?
parser.add_argument('--user', help='Username for CGAL Members')
parser.add_argument('--passwd', help='Password for CGAL Members')
# Upload related arguments
parser.add_argument('--upload-results', action='store_true', help='Actually upload the test results.')
parser.add_argument('--tester', help='The tester', default=getuser())
parser.add_argument('--tester-name', help='The name of the tester', default=gethostname())
parser.add_argument('--tester-address', help='The mail address of the tester')
if load_first_config('CGAL'):
default_arg_file = path.join(load_first_config('CGAL'), 'test_cgal_rc')
else:
default_arg_file = path.join(xdg_config_home, 'test_cgal_rc')
if path.isfile(default_arg_file):
logging.info('Using default arguments from: {}'.format(default_arg_file))
sys.argv.insert(1, '@' + default_arg_file)
return parser
if __name__ == "__main__":
pass
| gpl-3.0 | -5,400,662,399,486,295,000 | 51.506494 | 140 | 0.64655 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.