code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
from Headset import Headset
import logging
import time
puerto = 'COM3'
headset = Headset(logging.INFO)
try:
headset.connect(puerto, 115200)
except Exception, e:
raise e
print "Is conected? " + str(headset.isConnected())
print "-----------------------------------------"
headset.startReading(persist_data=True)
time.sleep(5)
headset.stopReading()
headset.closePort()
print "-----------------------------------------"
print "Is conected? " + str(headset.isConnected())
print headset.getStatus()
| emotrix/Emotrix | emotrix/HeadsetTester.py | Python | bsd-2-clause | 529 |
"""
==========================
RecoBundles80 using AFQ API
==========================
An example using the AFQ API to run recobundles with the
`80 bundle atlas <https://figshare.com/articles/Advanced_Atlas_of_80_Bundles_in_MNI_space/7375883>`_.
"""
import os.path as op
import plotly
from AFQ.api.group import GroupAFQ
import AFQ.data.fetch as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 50,000 seeds randomly distributed
# in the white matter.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(n_seeds=50000,
random_seeds=True,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify seg_algo as reco80 in segmentation_params. This tells the AFQ
# object to perform RecoBundles using the 80 bundles atlas in the
# segmentation step.
myafq = GroupAFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
preproc_pipeline='vistasoft',
segmentation_params={"seg_algo": "reco80"},
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.all_bundles_figure
plotly.io.show(bundle_html["01"])
| arokem/pyAFQ | examples/plot_afq_reco80.py | Python | bsd-2-clause | 2,044 |
# WARPnet Client<->Server Architecture
# WARPnet Parameter Definitions
#
# Author: Siddharth Gupta
import struct, time
from warpnet_common_params import *
from warpnet_client_definitions import *
from twisted.internet import reactor
import binascii
# Struct IDs
STRUCTID_CONTROL = 0x13
STRUCTID_CONTROL_ACK = 0x14
STRUCTID_COMMAND = 0x17
STRUCTID_COMMAND_ACK = 0x18
STRUCTID_OBSERVE_BER = 0x24
STRUCTID_OBSERVE_BER_REQ = 0x25
STRUCTID_OBSERVE_PER = 0x26
STRUCTID_OBSERVE_PER_REQ = 0x27
# Command IDs
COMMANDID_STARTTRIAL = 0x40
COMMANDID_STOPTRIAL = 0x41
COMMANDID_RESET_PER = 0x50
COMMANDID_ENABLE_BER_TESTING = 0x51
COMMANDID_DISABLE_BER_TESTING = 0x52
########################
## Struct Definitions ##
########################
# ControlStruct is a ClientStruct that stores some basic parameters to pass to the WARP board. The local variable can be accessed
# globally by calling ControlStruct.txPower etc. The struct must also understand the conversion from integer values to binary
# using the prepToSend function; it will be provided with the nodeID.
# typedef struct {
# char structID;
# char nodeID;
# char txPower;
# char channel;
# char modOrderHeader;
# char modOrderPayload;
# short reserved;
# int pktGen_period;
# int pktGen_length;
# } warpnetControl;
class ControlStruct(ClientStruct):
txPower = -1
channel = -1
modOrderHeader = -1
modOrderPayload = -1
reserved = 0
packetGeneratorPeriod = 0
packetGeneratorLength = 0
def __init__(self):
self.structID = STRUCTID_CONTROL
self.txPower = 63
self.channel = 4
self.modOrderHeader = 0
self.modOrderPayload = 2
self.packetGeneratorPeriod = 0
self.packetGeneratorLength = 1300
self.expectedReturnStructID = STRUCTID_CONTROL_ACK
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!6BHII', self.structID, nodeID, self.txPower, self.channel, self.modOrderHeader, self.modOrderPayload, self.reserved, self.packetGeneratorPeriod, self.packetGeneratorLength)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!BBH', rawData[0:4])
#print "Control struct successfully applied at node %d" % dataTuple[1]
#CommandStruct is used to send commands or requests to the WARP nodes
# The cmdIDs are defined above
# Matching C code definition:
# typedef struct {
# char structID;
# char nodeID;
# char cmdID;
# char cmdParam;
# } warpnetCommand;
class CommandStruct(ClientStruct):
cmdID = -1
cmdParam = -1
def __init__(self, cmdID, cmdParam):
self.structID = STRUCTID_COMMAND
self.expectedReturnStructID = STRUCTID_COMMAND_ACK
self.cmdID = cmdID
self.cmdParam = cmdParam
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!4B', self.structID, nodeID, self.cmdID, self.cmdParam)
def updateFromNode(self, rawData, pcapts):
pass
#print "Successfully executed command %d" % self.cmdID
#ObservePERStruct collects packet error rate (PER) data from WARP nodes
# Matching C code definition:
# typedef struct {
# unsigned char structID;
# unsigned char nodeID;
# unsigned char reqNum;
# unsigned char reqType;
# unsigned int numPkts_tx;
# unsigned int numPkts_rx_good;
# unsigned int numPkts_rx_goodHdrBadPyld;
# unsigned int numPkts_rx_badHdr;
# } warpnetObservePER;
class ObservePERStruct(ClientStruct):
numPkts_tx = -1
numPkts_rx_good = -1
numPkts_rx_goodHdrBadPyld = -1
numPkts_rx_badHdr = -1
reqNum = -1
reqType = -1
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_PER_REQ
self.expectedReturnStructID = STRUCTID_OBSERVE_PER
self.numPkts_tx = 0
self.numPkts_rx_good = 0
self.numPkts_rx_goodHdrBadPyld = 0
self.numPkts_rx_badHdr = 0
self.reqNum = 0
self.reqType = 0
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!4B', self.structID, nodeID, self.reqNum, self.reqType)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B 2B 4I', rawData[0:20])
self.reqNum = dataTuple[2]
self.reqType = dataTuple[3]
self.numPkts_tx = dataTuple[4]
self.numPkts_rx_good = dataTuple[5]
self.numPkts_rx_goodHdrBadPyld = dataTuple[6]
self.numPkts_rx_badHdr = dataTuple[7]
#Client struct for collecting BER updates from the ber_processor program
# Matching C code struct:
# typedef struct {
# unsigned char structID;
# unsigned char nodeID;
# unsigned short sequenceNumber;
# unsigned char nodeID_tx;
# unsigned char nodeID_rx;
# unsigned short mac_seqNum;
# unsigned char mac_pktType;
# unsigned char reserved0;
# unsigned char reserved1;
# unsigned char reserved2;
# unsigned int bits_rx;
# unsigned int bits_errors;
# } warpnetObserveBER;
class ObserveBERStruct(ClientStruct):
totalBitsReceived = 0
totalBitErrors = 0
nodeID_tx = -1
nodeID_rx = -1
def __init__(self, logger=None):
ClientStruct.__init__(self, logger)
self.structID = STRUCTID_OBSERVE_BER_REQ
self.expectedReturnStructID = STRUCTID_OBSERVE_BER
self.totalBitsReceived = 0
self.totalBitErrors = 0
def prepToSend(self, nodeID):
self.updateDone = False
return struct.pack('!BBH', self.structID, nodeID, 0)
def updateFromNode(self, rawData, pcapts):
dataTuple = struct.unpack('!2B H 2B H 2I', rawData[0:16])
self.nodeID_tx = dataTuple[3]
self.nodeID_rx = dataTuple[4]
self.totalBitsReceived += dataTuple[6]
self.totalBitErrors += dataTuple[7]
def clearBitCounts(self):
self.totalBitsReceived = 0
self.totalBitErrors = 0
| shailcoolboy/Warp-Trinity | ResearchApps/Measurement/examples/TxPower_vs_BER/warpnet_experiment_structs.py | Python | bsd-2-clause | 5,510 |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import io
from lxml import isoschematron, etree
from packtools.catalogs import SCHEMAS
SCH = etree.parse(SCHEMAS['sps-1.3'])
def TestPhase(phase_name, cache):
"""Factory of parsed Schematron phases.
:param phase_name: the phase name
:param cache: mapping type
"""
if phase_name not in cache:
phase = isoschematron.Schematron(SCH, phase=phase_name)
cache[phase_name] = phase
return cache[phase_name]
class PhaseBasedTestCase(unittest.TestCase):
cache = {}
def _run_validation(self, sample):
schematron = TestPhase(self.sch_phase, self.cache)
return schematron.validate(etree.parse(sample))
class JournalIdTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/journal-id elements.
Ticket #14 makes @journal-id-type="publisher-id" mandatory.
Ref: https://github.com/scieloorg/scielo_publishing_schema/issues/14
"""
sch_phase = 'phase.journal-id'
def test_case1(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
presence(@nlm-ta) is True
presence(@publisher-id) is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="nlm-ta">
Rev Saude Publica
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id">
RSP
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
presence(@nlm-ta) is False
presence(@publisher-id) is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type='doi'>
123.plin
</journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publisher_id_cannot_be_empty(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-id journal-id-type="publisher-id"></journal-id>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class JournalTitleGroupTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/journal-title-group elements.
"""
sch_phase = 'phase.journal-title-group'
def test_journal_title_group_is_absent(self):
sample = u"""<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case1(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
A: presence(journal-title) is True
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>
Revista de Saude Publica
</journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is True
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<abbrev-journal-title abbrev-type='publisher'>
Rev. Saude Publica
</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case4(self):
"""
A: presence(journal-title) is False
B: presence(abbrev-journal-title[@abbrev-type='publisher']) is False
A ^ B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_journal_title(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title></journal-title>
<abbrev-journal-title abbrev-type='publisher'>Rev. Saude Publica</abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_abbrev_journal_title(self):
sample = u"""<article>
<front>
<journal-meta>
<journal-title-group>
<journal-title>Revista de Saude Publica</journal-title>
<abbrev-journal-title abbrev-type='publisher'></abbrev-journal-title>
</journal-title-group>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PublisherTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/publisher elements.
"""
sch_phase = 'phase.publisher'
def test_publisher_is_present(self):
sample = u"""<article>
<front>
<journal-meta>
<publisher>
<publisher-name>British Medical Journal</publisher-name>
</publisher>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_publisher_is_absent(self):
sample = u"""<article>
<front>
<journal-meta>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_publisher_is_empty(self):
sample = u"""<article>
<front>
<journal-meta>
<publisher>
<publisher-name></publisher-name>
</publisher>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleCategoriesTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-categories elements.
"""
sch_phase = 'phase.article-categories'
def test_article_categories_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group>
<subject>ISO/TC 108</subject>
<subject>
SC 2, Measurement and evaluation of...
</subject>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_article_categories_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class fpage_OR_elocationTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/fpage or elocation-id elements.
"""
sch_phase = 'phase.fpage_or_elocation-id'
def test_case1(self):
"""
fpage is True
elocation-id is True
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<fpage>01</fpage>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
fpage is True
elocation-id is False
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<fpage>01</fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case3(self):
"""
fpage is False
elocation-id is True
fpage v elocation-id is True
"""
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
fpage is False
elocation-id is False
fpage v elocation-id is False
"""
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<fpage></fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_elocationid(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id></elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ISSNTests(PhaseBasedTestCase):
"""Tests for article/front/journal-meta/issn elements.
"""
sch_phase = 'phase.issn'
def test_case1(self):
"""
A: @pub-type='epub' is True
B: @pub-type='ppub' is True
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub">
0959-8138
</issn>
<issn pub-type="ppub">
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""
A: @pub-type='epub' is True
B: @pub-type='ppub' is False
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub">
0959-8138
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case3(self):
"""
A: @pub-type='epub' is False
B: @pub-type='ppub' is True
A v B is True
"""
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="ppub">
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case4(self):
"""
A: @pub-type='epub' is False
B: @pub-type='ppub' is False
A v B is False
"""
sample = u"""<article>
<front>
<journal-meta>
<issn>
0959-813X
</issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_empty_issn(self):
sample = u"""<article>
<front>
<journal-meta>
<issn pub-type="epub"></issn>
</journal-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleIdTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-id elements.
"""
sch_phase = 'phase.article-id'
def test_article_id_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_pub_id_type_doi_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<article-id>
10.1590/1414-431X20143434
</article-id>
<article-id pub-id-type='other'>
10.1590/1414-431X20143435
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_pub_id_type_doi(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_pub_id_type_doi_is_empty(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='doi'/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_pub_id_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='unknown'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_pub_id_type_case2(self):
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='unknown'>
10.1590/1414-431X20143434
</article-id>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_valid_pub_id_type_values(self):
for typ in ['doi', 'publisher-id', 'other']:
sample = u"""<article>
<front>
<article-meta>
<article-id pub-id-type='%s'>
10.1590/1414-431X20143433
</article-id>
<article-id pub-id-type='doi'>
10.1590/1414-431X20143434
</article-id>
</article-meta>
</front>
</article>
""" % typ
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SubjGroupTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/article-categories/subj-group elements.
"""
sch_phase = 'phase.subj-group'
def test_subj_group_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="kwd">
<subject content-type="neurosci">
Cellular and Molecular Biology
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Cellular and Molecular Biology
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_heading_in_subarticle_pt(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Original Article
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
<sub-article xml:lang="pt" article-type="translation" id="S01">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
</article-categories>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_many_heading_in_subarticle_pt(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Original Article
</subject>
<subj-group>
<subject content-type="neurosci">
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
<sub-article xml:lang="pt" article-type="translation" id="S01">
<front-stub>
<article-categories>
<subj-group subj-group-type="heading">
<subject>Artigos Originais</subject>
</subj-group>
<subj-group subj-group-type="heading">
<subject>Artigos Piratas</subject>
</subj-group>
</article-categories>
</front-stub>
</sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_heading_type_in_the_deep(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group>
<subject>
Cellular and Molecular Biology
</subject>
<subj-group subj-group-type="heading">
<subject>
Blood and brain barrier
</subject>
</subj-group>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_many_heading_type(self):
sample = u"""<article>
<front>
<article-meta>
<article-categories>
<subj-group subj-group-type="heading">
<subject>
Cellular and Molecular Biology
</subject>
</subj-group>
<subj-group subj-group-type="heading">
<subject>
Blood and brain barrier
</subject>
</subj-group>
</article-categories>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AbstractLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/abstract elements.
"""
sch_phase = 'phase.abstract_lang'
def test_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_with_lang(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article>
<front>
<article-meta>
<abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_for_research_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_research_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_research_article_only_with_transabstract(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="research-article">
<front>
<article-meta>
<trans-abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_for_review_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_review_article(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
<abstract>
<p>Differing socioeconomic positions in...</p>
</abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_review_article_only_with_transabstract(self):
sample = u"""<?xml version="1.0" encoding="UTF-8"?>
<article article-type="review-article">
<front>
<article-meta>
<trans-abstract xml:lang="en">
<p>Differing socioeconomic positions in...</p>
</trans-abstract>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ArticleTitleLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/title-group/article-title elements.
"""
sch_phase = 'phase.article-title_lang'
def test_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<title-group>
<article-title>
Systematic review of day hospital care...
</article-title>
</title-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_with_lang(self):
sample = u"""<article>
<front>
<article-meta>
<title-group>
<article-title xml:lang="en">
Systematic review of day hospital care...
</article-title>
</title-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_is_present_in_elementcitation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_is_present_in_elementcitation_with_lang(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title xml:lang="pt">Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class KwdGroupLangTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/kwd-group elements.
"""
sch_phase = 'phase.kwd-group_lang'
def test_single_occurence(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group>
<kwd>gene expression</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_many_occurencies(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group xml:lang="en">
<kwd>gene expression</kwd>
</kwd-group>
<kwd-group xml:lang="pt">
<kwd>expressao do gene</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_many_occurencies_without_lang(self):
sample = u"""<article>
<front>
<article-meta>
<kwd-group>
<kwd>gene expression</kwd>
</kwd-group>
<kwd-group>
<kwd>expressao do gene</kwd>
</kwd-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffContentTypeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/contrib-group
- article/front/article-meta
"""
sch_phase = 'phase.aff_contenttypes'
def test_original_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_original_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution>
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_many_original(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="original">
Galera de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_original_is_present_and_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
<aff>
<institution>
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_original_is_present_and_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_orgdiv1(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv1">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_orgdiv2(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv2">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_orgdiv3(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv3">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_normalized(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="normalized">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_orgdiv4(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgdiv4">
Instituto de Matematica e Estatistica
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_orgname_inside_contrib_group(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<institution content-type="orgname">
Instituto de Matematica e Estatistica
</institution>
</aff>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class CountsTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/counts elements.
"""
sch_phase = 'phase.counts'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_table_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_ref_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_fig_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_equation_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
</counts>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_tables(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="1"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<table-wrap>
<table frame="hsides" rules="groups">
<colgroup width="25%"><col/><col/><col/><col/></colgroup>
<thead>
<tr>
<th style="font-weight:normal" align="left">Modelo</th>
<th style="font-weight:normal">Estrutura</th>
<th style="font-weight:normal">Processos</th>
<th style="font-weight:normal">Resultados</th>
</tr>
</thead>
<tbody>
<tr>
<td valign="top">SIPA<sup>1,2</sup></td>
<td valign="top">Urgência e hospitalar.</td>
<td valign="top">Realiza triagem para fragilidade.</td>
<td valign="top">Maior gasto comunitário, menor gasto.</td>
</tr>
</tbody>
</table>
</table-wrap>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_tables_as_graphic(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="1"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<table-wrap id="t01">
<graphic mimetype="image"
xlink:href="1414-431X-bjmbr-1414-431X20142875-gt001">
</graphic>
</table-wrap>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_ref(self):
sample = u"""<article>
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="1"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<back>
<ref-list>
<title>REFERÊNCIAS</title>
<ref id="B1">
<label>1</label>
<mixed-citation>
Béland F, Bergman H, Lebel P, Clarfield AM, Tousignant P, ...
</mixed-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_fig(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="1"/>
<equation-count count="0"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<p>
<fig id="f01">
<label>Figura 1</label>
<caption>
<title>Modelo das cinco etapas da pesquisa translacional.</title>
</caption>
<graphic xlink:href="0034-8910-rsp-48-2-0347-gf01"/>
</fig>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_equation(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="1"/>
<page-count count="0"/>
</counts>
<fpage>0</fpage>
<lpage>0</lpage>
</article-meta>
</front>
<body>
<sec>
<disp-formula>
<tex-math id="M1">
</tex-math>
</disp-formula>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<fpage>140</fpage>
<lpage>150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_page_wrong_count(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="50"/>
</counts>
<fpage>140</fpage>
<lpage>150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_non_digit_pages(self):
"""Non-digit page interval cannot be checked automatically.
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<fpage>A140</fpage>
<lpage>A150</lpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_elocationid_pages(self):
"""Electronic pagination cannot be checked automatically.
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<counts>
<table-count count="0"/>
<ref-count count="0"/>
<fig-count count="0"/>
<equation-count count="0"/>
<page-count count="11"/>
</counts>
<elocation-id>A140</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class AuthorNotesTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/author-notes elements.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['author', 'con', 'conflict', 'corresp', 'current-aff',
'deceased', 'edited-by', 'equal', 'on-leave', 'participating-researchers',
'present-address', 'previously-at', 'study-group-members', 'other']:
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="wtf">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PubDateTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/pub-date elements.
"""
sch_phase = 'phase.pub-date'
def test_pub_type_absent(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_pub_type_allowed_values(self):
for pub_type in ['epub', 'epub-ppub', 'collection']:
sample = u"""<article>
<front>
<article-meta>
<pub-date pub-type="%s">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
""" % pub_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_pub_type_disallowed_value(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date pub-type="wtf">
<day>17</day>
<month>03</month>
<year>2014</year>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class VolumeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/volume
- article/back/ref-list/ref/element-citation/volume
"""
sch_phase = 'phase.volume'
def test_absent_in_front(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_but_empty_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<volume></volume>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_present_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<volume>10</volume>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class IssueTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/issue
- article/back/ref-list/ref/element-citation/issue
"""
sch_phase = 'phase.issue'
def test_absent_in_front(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present_but_empty_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<issue></issue>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_present_in_front(self):
sample = u"""<article>
<front>
<article-meta>
<issue>10</issue>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class SupplementTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/supplement
"""
sch_phase = 'phase.supplement'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_present(self):
sample = u"""<article>
<front>
<article-meta>
<supplement>Suppl 2</supplement>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ElocationIdTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/elocation-id
- article/back/ref-list/ref/element-citation/elocation-id
"""
sch_phase = 'phase.elocation-id'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_fpage(self):
sample = u"""<article>
<front>
<article-meta>
<elocation-id>E27</elocation-id>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_absent_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_and_without_fpage_back(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
<fpage>12</fpage>
</element-citation>
</ref>
<ref>
<element-citation>
<elocation-id>E27</elocation-id>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class HistoryTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/history
"""
sch_phase = 'phase.history'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_allowed_values(self):
for pub_type in ['received', 'accepted', 'rev-recd']:
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="%s">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
""" % pub_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_date_type_disallowed_values(self):
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="invalid">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_date_type_allowed_values_multi(self):
sample = u"""<article>
<front>
<article-meta>
<history>
<date date-type="received">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
<date date-type="accepted">
<day>17</day>
<month>03</month>
<year>2014</year>
</date>
</history>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ProductTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/product
"""
sch_phase = 'phase.product'
def test_absent(self):
sample = u"""<article>
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_absent_allowed_types(self):
for art_type in ['book-review', 'product-review']:
sample = u"""<article article-type="%s">
<front>
<article-meta>
</article-meta>
</front>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_types(self):
for art_type in ['book-review', 'product-review']:
sample = u"""<article article-type="%s">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_types(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_no_type(self):
sample = u"""<article>
<front>
<article-meta>
<product product-type="book">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_product_type(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product>
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_product_types(self):
for prod_type in ['book', 'software', 'article', 'chapter', 'other']:
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="%s">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
""" % prod_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_product_types(self):
sample = u"""<article article-type="book-review">
<front>
<article-meta>
<product product-type="invalid">
<person-group person-group-type="author">
<name>
<surname>Sobrenome do autor</surname>
<given-names>Prenomes do autor</given-names>
</name>
</person-group>
<source>Título do livro</source>
<year>Ano de publicação</year>
<publisher-name>Nome da casa publicadora/Editora</publisher-name>
<publisher-loc>Local de publicação</publisher-loc>
<page-count count="total de paginação do livro (opcional)"/>
<isbn>ISBN do livro, se houver</isbn>
<inline-graphic>1234-5678-rctb-45-05-690-gf01.tif</inline-graphic>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SecTitleTests(PhaseBasedTestCase):
"""Tests for:
- article/body/sec/title
"""
sch_phase = 'phase.sectitle'
def test_absent(self):
sample = u"""<article>
<body>
<sec>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_has_title(self):
sample = u"""<article>
<body>
<sec>
<title>Introduction</title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_has_empty_title(self):
sample = u"""<article>
<body>
<sec>
<title></title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ParagraphTests(PhaseBasedTestCase):
"""Tests for //p
"""
sch_phase = 'phase.paragraph'
def test_sec_without_id(self):
sample = u"""<article>
<body>
<sec>
<title>Intro</title>
<p>Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_sec_with_id(self):
sample = u"""<article>
<body>
<sec>
<title>Intro</title>
<p id="p01">Foo bar</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_body_without_id(self):
sample = u"""<article>
<body>
<p>Foo bar</p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_body_with_id(self):
sample = u"""<article>
<body>
<p id="p01">Foo bar</p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XrefRidTests(PhaseBasedTestCase):
"""Tests for //xref[@rid]
"""
sch_phase = 'phase.rid_integrity'
def test_mismatching_rid(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib>
<xref ref-type="aff" rid="aff1">
<sup>I</sup>
</xref>
</contrib>
</contrib-group>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_matching_rid(self):
sample = u"""<article>
<front>
<article-meta>
<contrib-group>
<contrib>
<xref ref-type="aff" rid="aff1">
<sup>I</sup>
</xref>
</contrib>
</contrib-group>
<aff id="aff1">
<label>I</label>
<institution content-type="orgname">
Secretaria Municipal de Saude de Belo Horizonte
</institution>
<addr-line>
<named-content content-type="city">Belo Horizonte</named-content>
<named-content content-type="state">MG</named-content>
</addr-line>
<country>Brasil</country>
<institution content-type="original">
Secretaria Municipal de Saude de Belo Horizonte. Belo Horizonte, MG, Brasil
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_mismatching_reftype(self):
sample = u"""<article>
<body>
<sec>
<table-wrap id="t01">
</table-wrap>
</sec>
<sec>
<p>
<xref ref-type="aff" rid="t01">table 1</xref>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XrefRefTypeTests(PhaseBasedTestCase):
"""Tests for //xref[@ref-type]
"""
sch_phase = 'phase.xref_reftype_integrity'
def test_allowed_ref_types(self):
for reftype in ['aff', 'app', 'author-notes', 'bibr', 'contrib',
'corresp', 'disp-formula', 'fig', 'fn', 'sec',
'supplementary-material', 'table', 'table-fn',
'boxed-text']:
sample = u"""<article>
<body>
<sec>
<p>
<xref ref-type="%s">foo</xref>
</p>
</sec>
</body>
</article>
""" % reftype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_ref_types(self):
for reftype in ['chem', 'kwd', 'list', 'other', 'plate'
'scheme', 'statement']:
sample = u"""<article>
<body>
<sec>
<p>
<xref ref-type="%s">foo</xref>
</p>
</sec>
</body>
</article>
""" % reftype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class CaptionTests(PhaseBasedTestCase):
"""Tests for //caption
"""
sch_phase = 'phase.caption'
def test_with_title(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<title>
Percentual de atividade mitocondrial.
</title>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_without_title(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<label>
Percentual de atividade mitocondrial.
</label>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_title_and_more(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<fig id="f03">
<label>Figura 3</label>
<caption>
<title>
Percentual de atividade mitocondrial.
</title>
<label>
Percentual de atividade mitocondrial.
</label>
</caption>
<graphic xlink:href="1234-5678-rctb-45-05-0110-gf01.tif"/>
</fig>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class LicenseTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/permissions/license element.
"""
sch_phase = 'phase.license'
def test_missing_permissions_elem(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_license(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_license_type(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_license_type(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="closed-access"
xlink:href="http://creativecommons.org/licenses/by/4.0/">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_license_href(self):
allowed_licenses = [
'http://creativecommons.org/licenses/by-nc/4.0/',
'http://creativecommons.org/licenses/by-nc/3.0/',
'http://creativecommons.org/licenses/by/4.0/',
'http://creativecommons.org/licenses/by/3.0/',
'http://creativecommons.org/licenses/by-nc-nd/4.0/',
'http://creativecommons.org/licenses/by-nc-nd/3.0/',
'http://creativecommons.org/licenses/by/3.0/igo/',
'http://creativecommons.org/licenses/by-nc/3.0/igo/',
'http://creativecommons.org/licenses/by-nc-nd/3.0/igo/',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_license_href_https_scheme(self):
allowed_licenses = [
'https://creativecommons.org/licenses/by-nc/4.0/',
'https://creativecommons.org/licenses/by-nc/3.0/',
'https://creativecommons.org/licenses/by/4.0/',
'https://creativecommons.org/licenses/by/3.0/',
'https://creativecommons.org/licenses/by-nc-nd/4.0/',
'https://creativecommons.org/licenses/by-nc-nd/3.0/',
'https://creativecommons.org/licenses/by/3.0/igo/',
'https://creativecommons.org/licenses/by-nc/3.0/igo/',
'https://creativecommons.org/licenses/by-nc-nd/3.0/igo/',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_license_href(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="http://opensource.org/licenses/MIT">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_trailing_slash(self):
allowed_licenses = [
'https://creativecommons.org/licenses/by-nc/4.0',
]
for license in allowed_licenses:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<front>
<article-meta>
<permissions>
<license license-type="open-access"
xlink:href="%s">
<license-p>
This is an open-access article distributed under the terms...
</license-p>
</license>
</permissions>
</article-meta>
</front>
</article>
""" % license
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class AckTests(PhaseBasedTestCase):
"""Tests for article/back/ack element.
"""
sch_phase = 'phase.ack'
def test_with_sec(self):
sample = u"""<article>
<back>
<ack>
<sec>
<p>Some</p>
</sec>
</ack>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_without_sec(self):
sample = u"""<article>
<back>
<ack>
<title>Acknowledgment</title>
<p>Some text</p>
</ack>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ElementCitationTests(PhaseBasedTestCase):
"""Tests for article/back/ref-list/ref/element-citation element.
"""
sch_phase = 'phase.element-citation'
def test_with_name_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<name>Foo</name>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_name_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_etal_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<etal>Foo</etal>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_etal_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<etal>Foo</etal>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_with_collab_outside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<collab>Foo</collab>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_collab_inside_persongroup(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group>
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_publication_types(self):
for pub_type in ['journal', 'book', 'webpage', 'thesis', 'confproc',
'patent', 'software', 'database', 'legal-doc', 'newspaper',
'other', 'report']:
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="%s">
</element-citation>
</ref>
</ref-list>
</back>
</article>
""" % pub_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_publication_types(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="invalid">
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_outside_ref(self):
sample = u"""<article>
<body>
<sec>
<p>
<element-citation publication-type="journal">
<person-group>
<collab>Foo</collab>
</person-group>
</element-citation>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class PersonGroupTests(PhaseBasedTestCase):
"""Tests for
- article/back/ref-list/ref/element-citation/person-group
- article/front/article-meta/product/person-group
"""
sch_phase = 'phase.person-group'
def test_missing_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group>
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_type_at_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<person-group>
<name>Foo</name>
</person-group>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_with_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="author">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_types(self):
for group_type in ['author', 'compiler', 'editor', 'translator']:
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="%s">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
""" % group_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_type(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<person-group person-group-type="invalid">
<name>Foo</name>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_loose_text_below_element_citation_node(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">HERE
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_loose_text_below_product_node(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<person-group person-group-type="author">HERE
<collab>Foo</collab>
</person-group>
</product>
</article-meta>
</front>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<collab>Foo</collab>
</person-group>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class FNGroupTests(PhaseBasedTestCase):
"""Tests for article/back/fn-group/fn element.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['abbr', 'com', 'financial-disclosure', 'supported-by',
'presented-at', 'supplementary-material', 'other']:
sample = u"""<article>
<back>
<fn-group>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<back>
<fn-group>
<fn fn-type="invalid">
<p>foobar</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class XHTMLTableTests(PhaseBasedTestCase):
"""Tests for //table elements.
"""
sch_phase = 'phase.xhtml-table'
def test_valid_toplevel(self):
for elem in ['caption', 'summary', 'col', 'colgroup', 'thead', 'tfoot', 'tbody']:
sample = u"""<article>
<body>
<sec>
<p>
<table>
<%s></%s>
</table>
</p>
</sec>
</body>
</article>
""" % (elem, elem)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_invalid_toplevel(self):
for elem in ['tr']:
sample = u"""<article>
<body>
<sec>
<p>
<table>
<%s></%s>
</table>
</p>
</sec>
</body>
</article>
""" % (elem, elem)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_tbody_upon_th(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<tbody>
<tr>
<th>Foo</th>
</tr>
</tbody>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_thead_upon_th(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<thead>
<tr>
<th>Foo</th>
</tr>
</thead>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_thead_upon_td(self):
sample = u"""<article>
<body>
<sec>
<p>
<table>
<thead>
<tr>
<td>Foo</td>
</tr>
</thead>
</table>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SupplementaryMaterialMimetypeTests(PhaseBasedTestCase):
"""Tests for article//supplementary-material elements.
"""
sch_phase = 'phase.supplementary-material'
def test_case1(self):
"""mimetype is True
mime-subtype is True
mimetype ^ mime-subtype is True
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mimetype="application"
mime-subtype="pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_case2(self):
"""mimetype is True
mime-subtype is False
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mimetype="application">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case3(self):
"""mimetype is False
mime-subtype is True
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf"
mime-subtype="pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_case4(self):
"""mimetype is False
mime-subtype is False
mimetype ^ mime-subtype is False
"""
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<supplementary-material id="S1"
xlink:title="local_file"
xlink:href="1471-2105-1-1-s1.pdf">
<label>Additional material</label>
<caption>
<p>Supplementary PDF file supplied by authors.</p>
</caption>
</supplementary-material>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AuthorNotesFNTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/author-notes/fn element.
"""
sch_phase = 'phase.fn-group'
def test_allowed_fn_types(self):
for fn_type in ['author', 'con', 'conflict', 'corresp', 'current-aff',
'deceased', 'edited-by', 'equal', 'on-leave',
'participating-researchers', 'present-address',
'previously-at', 'study-group-members', 'other',
'presented-at', 'presented-by']:
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="%s">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
""" % fn_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_fn_types(self):
sample = u"""<article>
<front>
<article-meta>
<author-notes>
<fn fn-type="invalid">
<p>foobar</p>
</fn>
</author-notes>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ArticleAttributesTests(PhaseBasedTestCase):
"""Tests for article element.
"""
sch_phase = 'phase.article-attrs'
def test_allowed_article_types(self):
for art_type in ['other', 'article-commentary', 'case-report',
'editorial', 'correction', 'letter', 'research-article',
'in-brief', 'review-article', 'book-review', 'retraction',
'brief-report', 'rapid-communication', 'reply', 'translation']:
sample = u"""<article article-type="%s" xml:lang="en" dtd-version="1.0" specific-use="sps-1.3">
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_article_type(self):
sample = u"""<article article-type="invalid" dtd-version="1.0" specific-use="sps-1.3">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_article_type(self):
sample = u"""<article xml:lang="en" dtd-version="1.0" specific-use="sps-1.3">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.3">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_dtdversion(self):
sample = u"""<article article-type="research-article" xml:lang="en" specific-use="sps-1.3">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_sps_version(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" xml:lang="en">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_sps_version(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" xml:lang="en" specific-use="sps-1.0">
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class NamedContentTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/aff/addr-line/named-content elements.
"""
sch_phase = 'phase.named-content_attrs'
def test_missing_contenttype(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content>Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_allowed_contenttype(self):
for ctype in ['city', 'state']:
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content content-type="%s">Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
""" % ctype
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_contenttype(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<addr-line>
<named-content content-type="invalid">Foo</named-content>
</addr-line>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class MonthTests(PhaseBasedTestCase):
"""Tests for //month elements.
"""
sch_phase = 'phase.month'
def test_range_1_12(self):
for month in range(1, 13):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%s</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_range_01_12(self):
for month in range(1, 13):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%02d</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_out_of_range(self):
for month in [0, 13]:
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>%s</month>
</pub-date>
</article-meta>
</front>
</article>
""" % month
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_must_be_integer(self):
sample = u"""<article>
<front>
<article-meta>
<pub-date>
<month>January</month>
</pub-date>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SizeTests(PhaseBasedTestCase):
"""Tests for:
- article/front/article-meta/product/size
- article/back/ref-list/ref/element-citation/size
"""
sch_phase = 'phase.size'
def test_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<size units="pages">2</size>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size units="pages">2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_units_in_product(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size>2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_units_in_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation>
<size>2</size>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_invalid_units_value(self):
sample = u"""<article>
<front>
<article-meta>
<product>
<size units="invalid">2</size>
</product>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ListTests(PhaseBasedTestCase):
"""Tests for list elements.
"""
sch_phase = 'phase.list'
def test_allowed_list_type(self):
for list_type in ['order', 'bullet', 'alpha-lower', 'alpha-upper',
'roman-lower', 'roman-upper', 'simple']:
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="%s">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="%s">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
""" % (list_type, list_type)
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="invalid">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="invalid">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_disallowed_sub_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="order">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list list-type="invalid">
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list>
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list>
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_sub_list_type(self):
sample = u"""<article>
<body>
<sec>
<p>
<list list-type="order">
<title>Lista Númerica</title>
<list-item>
<p>Nullam gravida tellus eget condimentum egestas.</p>
</list-item>
<list-item>
<list>
<list-item>
<p>Curabitur luctus lorem ac feugiat pretium.</p>
</list-item>
</list>
</list-item>
<list-item>
<p>Donec pulvinar odio ut enim lobortis, eu dignissim elit accumsan.</p>
</list-item>
</list>
</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class MediaTests(PhaseBasedTestCase):
"""Tests for article/body//p/media elements.
"""
sch_phase = 'phase.media_attributes'
def test_missing_mimetype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mime-subtype="mp4" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_mime_subtype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_href(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" mime-subtype="mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_all_present(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<p><media mimetype="video" mime-subtype="mp4" xlink:href="1234-5678-rctb-45-05-0110-m01.mp4"/></p>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
class ExtLinkTests(PhaseBasedTestCase):
"""Tests for ext-link elements.
"""
sch_phase = 'phase.ext-link'
def test_complete(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_allowed_extlinktype(self):
for link_type in ['uri', 'clinical-trial' ]:
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="%s" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
""" % link_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_extlinktype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="invalid" xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_extlinktype(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link xlink:href="http://www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xlinkhref(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_uri_without_scheme(self):
sample = u"""<article xmlns:xlink="http://www.w3.org/1999/xlink">
<body>
<sec>
<p>Neque porro quisquam est <ext-link ext-link-type="uri" xlink:href="www.scielo.org">www.scielo.org</ext-link> qui dolorem ipsum quia</p>
</sec>
</body>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class SubArticleAttributesTests(PhaseBasedTestCase):
"""Tests for sub-article element.
"""
sch_phase = 'phase.sub-article-attrs'
def test_allowed_article_types(self):
for art_type in ['abstract', 'letter', 'reply', 'translation']:
sample = u"""<article article-type="research-article" xml:lang="en" dtd-version="1.0" specific-use="sps-1.3">
<sub-article article-type="%s" xml:lang="pt" id="sa1"></sub-article>
</article>
""" % art_type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_article_type(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.3">
<sub-article article-type="invalid" xml:lang="pt" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_article_type(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.3">
<sub-article xml:lang="pt" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.3">
<sub-article article-type="translation" id="trans_pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article article-type="research-article" dtd-version="1.0" specific-use="sps-1.3">
<sub-article article-type="translation" xml:lang="pt"></sub-article>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ResponseAttributesTests(PhaseBasedTestCase):
"""Tests for response element.
"""
sch_phase = 'phase.response-attrs'
def test_allowed_response_types(self):
for type in ['addendum', 'discussion', 'reply']:
sample = u"""<article>
<response response-type="%s" xml:lang="pt" id="r1"></response>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_response_type(self):
sample = u"""<article>
<response response-type="invalid" xml:lang="pt" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_response_type(self):
sample = u"""<article>
<response xml:lang="pt" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_xmllang(self):
sample = u"""<article>
<response response-type="invalid" id="r1"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article>
<response response-type="invalid" xml:lang="pt"></response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class ResponseReplyAttributeTests(PhaseBasedTestCase):
"""Tests for response[@response-type='reply'] elements.
"""
sch_phase = 'phase.response-reply-type'
def test_reply_type_demands_an_article_type(self):
""" the article-type of value `article-commentary` is required
"""
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_reply_type_invalid_article_type(self):
""" anything different of `article-commentary` is invalid
"""
sample = u"""<article article-type="research-article">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_reply_type_missing_related_article(self):
""" the article-type of value `article-commentary` is required
"""
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_related_article_missing_vol(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_related_article_missing_page(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" elocation-id="1q2w"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_related_article_missing_elocationid(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109" page="87-92"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_related_article_missing_page_and_elocationid(self):
sample = u"""<article article-type="article-commentary">
<response response-type="reply" xml:lang="pt" id="r1">
<front-stub>
<related-article related-article-type="commentary-article" id="ra1" vol="109"/>
</front-stub>
</response>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class RelatedArticleTypesTests(PhaseBasedTestCase):
"""Tests for related-article element.
"""
sch_phase = 'phase.related-article-attrs'
def test_allowed_related_article_types(self):
for type in ['corrected-article', 'press-release', 'commentary-article', 'article-reference']:
sample = u"""<article>
<front>
<article-meta>
<related-article related-article-type="%s" id="01"/>
</article-meta>
</front>
</article>
""" % type
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_disallowed_related_article_type(self):
sample = u"""<article>
<front>
<article-meta>
<related-article related-article-type="invalid" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_id(self):
sample = u"""<article>
<front>
<article-meta>
<related-article related-article-type="corrected-article"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_related_article_type(self):
sample = u"""<article>
<front>
<article-meta>
<related-article id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class CorrectionTests(PhaseBasedTestCase):
"""Tests for article[@article-type="correction"] element.
"""
sch_phase = 'phase.correction'
def test_expected_elements(self):
sample = u"""<article article-type="correction">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_related_article(self):
""" must have a related-article[@related-article-type='corrected-article']
element.
"""
sample = u"""<article article-type="correction">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_article_type_must_be_correction(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<related-article related-article-type="corrected-article" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class InBriefTests(PhaseBasedTestCase):
"""Tests for article[@article-type="in-brief"] element.
"""
sch_phase = 'phase.in-brief'
def test_expected_elements(self):
sample = u"""<article article-type="in-brief">
<front>
<article-meta>
<related-article related-article-type="article-reference" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_related_article(self):
""" must have a related-article[@related-article-type='in-brief']
element.
"""
sample = u"""<article article-type="in-brief">
<front>
<article-meta>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_article_type_must_be_in_brief(self):
sample = u"""<article article-type="research-article">
<front>
<article-meta>
<related-article related-article-type="article-reference" id="01"/>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class FundingGroupTests(PhaseBasedTestCase):
"""Tests for article/front/article-meta/funding-group elements.
"""
sch_phase = 'phase.funding-group'
def test_funding_statement_when_fn_is_present_missing_award_group(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<funding-statement>This study was supported by FAPEST #12345</funding-statement>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_funding_statement_when_fn_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<award-group>
<funding-source>FAPEST</funding-source>
<award-id>12345</award-id>
</award-group>
<funding-statement>This study was supported by FAPEST #12345</funding-statement>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_funding_statement_when_fn_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<funding-group>
<award-group>
<funding-source>FAPEST</funding-source>
<award-id>12345</award-id>
</award-group>
</funding-group>
</article-meta>
</front>
<back>
<fn-group>
<fn id="fn01" fn-type="financial-disclosure">
<p>This study was supported by FAPEST #12345</p>
</fn>
</fn-group>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffCountryTests(PhaseBasedTestCase):
""" //aff/country/@country is required.
See: https://github.com/scieloorg/packtools/issues/44
"""
sch_phase = 'phase.aff_country'
def test_attribute_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_attribute_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country>Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_attribute_value_is_not_validated(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_cannot_be_empty(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ"></country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_country_cannot_be_empty_closed_element(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="XZ"/>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class RefTests(PhaseBasedTestCase):
"""Tests for article/back/ref-list/ref element.
"""
sch_phase = 'phase.ref'
def test_element_and_mixed_citation_elements(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_missing_element_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation>Aires M, Paz AA, Perosa CT. Situação de saúde e grau de dependência de pessoas idosas institucionalizadas. <italic>Rev Gaucha Enferm.</italic> 2009;30(3):192-9.</mixed-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_missing_mixed_citation(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
def test_mixed_citation_cannot_be_empty(self):
sample = u"""<article>
<back>
<ref-list>
<ref>
<mixed-citation></mixed-citation>
<element-citation publication-type="journal">
<person-group person-group-type="author">
<name>
<surname>Aires</surname>
<given-names>M</given-names>
</name>
<name>
<surname>Paz</surname>
<given-names>AA</given-names>
</name>
<name>
<surname>Perosa</surname>
<given-names>CT</given-names>
</name>
</person-group>
<article-title>Situação de saúde e grau de dependência de pessoas idosas institucionalizadas</article-title>
<source>Rev Gaucha Enferm</source>
<year>2009</year>
<volume>30</volume>
<issue>3</issue>
<fpage>192</fpage>
<lpage>199</lpage>
</element-citation>
</ref>
</ref-list>
</back>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
class AffTests(PhaseBasedTestCase):
""" /article//aff is required.
"""
sch_phase = 'phase.aff'
def test_country_is_present(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
<country country="BR">Brasil</country>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertTrue(self._run_validation(sample))
def test_country_is_absent(self):
sample = u"""<article>
<front>
<article-meta>
<aff>
<institution content-type="original">
Grupo de ...
</institution>
</aff>
</article-meta>
</front>
</article>
"""
sample = io.BytesIO(sample.encode('utf-8'))
self.assertFalse(self._run_validation(sample))
| gustavofonseca/packtools | tests/test_schematron_1_3.py | Python | bsd-2-clause | 181,171 |
# Copyright (c) 2013-2016 Hewlett Packard Enterprise Development LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import os.path
import sys
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError
from requestbuilder.mixins import FileTransferProgressBarMixin
import six
from euca2ools.commands.s3 import S3Request
import euca2ools.bundle.pipes
class GetObject(S3Request, FileTransferProgressBarMixin):
DESCRIPTION = 'Retrieve objects from the server'
ARGS = [Arg('source', metavar='BUCKET/KEY', route_to=None,
help='the object to download (required)'),
Arg('-o', dest='dest', metavar='PATH', route_to=None,
default='.', help='''where to download to. If this names a
directory the object will be written to a file inside of that
directory. If this is is "-" the object will be written to
stdout. Otherwise it will be written to a file with the name
given. (default: current directory)''')]
def configure(self):
S3Request.configure(self)
bucket, _, key = self.args['source'].partition('/')
if not bucket:
raise ArgumentError('source must contain a bucket name')
if not key:
raise ArgumentError('source must contain a key name')
if isinstance(self.args.get('dest'), six.string_types):
# If it is not a string we assume it is a file-like object
if self.args['dest'] == '-':
self.args['dest'] = sys.stdout
elif os.path.isdir(self.args['dest']):
basename = os.path.basename(key)
if not basename:
raise ArgumentError("specify a complete file path with -o "
"to download objects that end in '/'")
dest_path = os.path.join(self.args['dest'], basename)
self.args['dest'] = open(dest_path, 'w')
else:
self.args['dest'] = open(self.args['dest'], 'w')
def preprocess(self):
self.path = self.args['source']
def main(self):
# Note that this method does not close self.args['dest']
self.preprocess()
bytes_written = 0
md5_digest = hashlib.md5()
sha_digest = hashlib.sha1()
response = self.send()
content_length = response.headers.get('Content-Length')
if content_length:
pbar = self.get_progressbar(label=self.args['source'],
maxval=int(content_length))
else:
pbar = self.get_progressbar(label=self.args['source'])
pbar.start()
for chunk in response.iter_content(chunk_size=euca2ools.BUFSIZE):
self.args['dest'].write(chunk)
bytes_written += len(chunk)
md5_digest.update(chunk)
sha_digest.update(chunk)
if pbar is not None:
pbar.update(bytes_written)
self.args['dest'].flush()
pbar.finish()
# Integrity checks
if content_length and bytes_written != int(content_length):
self.log.error('rejecting download due to Content-Length size '
'mismatch (expected: %i, actual: %i)',
content_length, bytes_written)
raise RuntimeError('downloaded file appears to be corrupt '
'(expected size: {0}, actual: {1})'
.format(content_length, bytes_written))
etag = response.headers.get('ETag', '').lower().strip('"')
if (len(etag) == 32 and
all(char in '0123456789abcdef' for char in etag)):
# It looks like an MD5 hash
if md5_digest.hexdigest() != etag:
self.log.error('rejecting download due to ETag MD5 mismatch '
'(expected: %s, actual: %s)',
etag, md5_digest.hexdigest())
raise RuntimeError('downloaded file appears to be corrupt '
'(expected MD5: {0}, actual: {1})'
.format(etag, md5_digest.hexdigest()))
return {self.args['source']: {'md5': md5_digest.hexdigest(),
'sha1': sha_digest.hexdigest(),
'size': bytes_written}}
| gholms/euca2ools | euca2ools/commands/s3/getobject.py | Python | bsd-2-clause | 5,714 |
import cPickle
import gzip
import time
import os
import sys
import cPickle as pickle
import gc
import numpy as np
from time import sleep
import auc
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
from theano.ifelse import ifelse
import theano.printing
from collections import OrderedDict
from logisticRegression import LogisticRegression
from layers import DropoutHiddenLayer, HiddenLayer2d, HiddenLayer, ConvolutionalHiddenSoftmax, ConvolutionalLayer
import warnings
warnings.filterwarnings('ignore')
L = 330
n_epochs = 20
Q = 14
NUM_TRAIN = 1200000 #(use multiplicity of 50'000)
MINIREAD = 1
batch_size= 1000
############
############ uncomment these lines below to verify if the code runs correctly, execution around 15 times faster
############
# n_epochs = 4
# Q = 4
# NUM_TRAIN = 250000
# MINIREAD = 4
# batch_size= 1000
THREAD = 20
span = 1
NN = 1000
POOL = 10
Knormal = 1794 * 100 / MINIREAD
learning_rate0 = 0.2;
def ReLU(x):
y = T.maximum(0.0, x)
return (y)
def read(s, sn ,sp, Kile):
P=[]; lenn = []; nott = 0
_nps = []
_s = []
with open(path+sp) as ff:
for line in ff:
x,y = line.split(',')
P.append([float(x),float(y)])
print "opening"
with open(path+s) as f:
rlast = []; cnt = 0; arrayprev = []; Ti = []; ile = 0
for line in f:
if cnt % 17940 == 0:
print str(cnt/1794), "% ",
if cnt != 0:
pos = 0; r = []; rr2 = np.zeros(NN); rr = np.zeros(NN); rp = []
for x in line.split(','):
x_f = float(x)
rp.append(x_f)
for x in rp:
val2 = x - arrayprev[pos]
rr[pos] = val2 # to sum
pos+=1
nps = np.sum(rr)
_w = [_x for _x in rr if _x >= 0.2]
_wn = len(_w)
if nps < THREAD :
if nott > 0:
lenn.append(nott);
ile+=nott
nott = 0
else:
nott -=1
else:
if nott <= 0:
nott = 1
else:
nott += 1
pos+=1
if nott >= 1:
Ti.append(rr)
_nps.append(nps)
if nott==1:
_s.append(1)
else:
_s.append(1)
arrayprev = rp
else:
arrayprev = [float(x) for x in line.split(',')]
if cnt > Kile + 10:
break
cnt+=1
C = [[0]*len(rr)]*len(rr)
C = np.asarray(C)
print "\n\n selected frames number = ", ile, "\n\n"
if sn != None:
with open(path+sn) as ff:
for line in ff:
a,b,w = line.split(',')
a = int(a); b = int(b); w = int(w)
if w==1:
C[a-1][b-1] = 1;
print "trans..."
Tprim = np.empty((len(rr)+2, ile), np.float32) ##############
for j in range(len(rr)):
a = []
for i in range(ile):
Tprim[j][i] = Ti[i][j]
for i in range(ile):
Tprim[1000][i] = _nps[i]
for i in range(ile):
Tprim[1001][i] = _s[i]
gc.collect()
print "AVG SPLIT LEN: ", np.mean(lenn)
return Tprim, C, P
def learnAndPredict(Ti, C, TOList):
rng = np.random.RandomState(SEED)
learning_rate = learning_rate0
print np.mean(Ti[1000,:])
aminW = np.amin(Ti[:1000,:])
amaxW = np.amax(Ti[:1000,:])
Ti[:1000,:] = (Ti[:1000,:] - aminW) / (amaxW - aminW)
astdW = np.std(Ti[:1000,:])
ameanW = np.mean(Ti[:1000,:])
Ti[:1000,:] = (Ti[:1000,:] - ameanW) / astdW
aminacW = np.amin(Ti[1000,:])
amaxacW = np.amax(Ti[1000,:])
print aminW, amaxW, aminacW, amaxacW
Ti[1000,:] = (Ti[1000,:] - aminacW) / (amaxacW - aminacW)
astdacW = np.std(Ti[1000,:])
ameanacW = np.mean(Ti[1000,:])
Ti[1000,:] = (Ti[1000,:] - ameanacW) / astdacW
ile__ = len(TOList)
ileList = np.zeros(ile__)
for titer in range(len(TOList)):
print np.mean(TOList[titer][1000,:])
TOList[titer][:1000,:] = (TOList[titer][:1000,:] - aminW)/(amaxW - aminW)
TOList[titer][:1000,:] = (TOList[titer][:1000,:] - ameanW)/astdW
TOList[titer][1000,:] = (TOList[titer][1000,:] - aminacW)/(amaxacW - aminacW)
TOList[titer][1000,:] = (TOList[titer][1000,:] - ameanacW)/astdacW
_, ileList[titer] = TOList[titer].shape
_, ile = Ti.shape
N = NN
data = []; yyy = []; need = 1; BYL = {}; j= 0; dwa = 0; ONES = []; ZEROS = []
for i in range(NN):
for j in range(NN):
if i!= j:
if C[i][j]==1:
ONES.append((i,j))
else:
ZEROS.append((i,j))
Nones = len(ONES)
rng.shuffle(ONES)
Nzeros = len(ZEROS)
print Nones
print Nzeros
Needed = NUM_TRAIN/2
onesPerPair = Needed / Nones + 1
onesIter = 0
jj = 0
while jj < NUM_TRAIN:
if jj%300000 == 0:
print jj/300000,
need = 1 - need
if need == 1:
pairNo = onesIter % Nones
ppp = onesIter / Nones
s,t = ONES[pairNo]
shift = rng.randint(0, ile - L)
onesIter += 1
if need == 0:
zer = rng.randint(Nzeros)
s,t = ZEROS[zer]
del ZEROS[zer]
Nzeros -= 1
shift = rng.randint(0, ile - L)
x = np.hstack(( Ti[s][shift:shift+L], Ti[t][shift:shift+L], Ti[1000][shift:shift+L]))
y = C[s][t]
data.append(x); yyy.append(y)
jj+=1
data = np.array(data, dtype=theano.config.floatX)
is_train = np.array( ([0]*96 + [1,1,2,2]) * (NUM_TRAIN / 100))
yyy = np.array(yyy)
train_set_x0, train_set_y0 = np.array(data[is_train==0]), yyy[is_train==0]
test_set_x, test_set_y = np.array(data[is_train==1]), yyy[is_train==1]
valid_set_x, valid_set_y = np.array(data[is_train==2]), yyy[is_train==2]
n_train_batches = len(train_set_y0) / batch_size
n_valid_batches = len(valid_set_y) / batch_size
n_test_batches = len(test_set_y) / batch_size
epoch = T.scalar()
index = T.lscalar()
x = T.matrix('x')
inone2 = T.matrix('inone2')
y = T.ivector('y')
print '... building the model'
#-------- my layers -------------------
#---------------------
layer0_input = x.reshape((batch_size, 1, 3, L))
Cx = 5
layer0 = ConvolutionalLayer(rng, input=layer0_input,
image_shape=(batch_size, 1, 3, L),
filter_shape=(nkerns[0], 1, 2, Cx), poolsize=(1, 1), fac = 0)
ONE = (3 - 2 + 1) / 1
L2 = (L - Cx + 1) / 1
#---------------------
Cx2 = 5
layer1 = ConvolutionalLayer(rng, input=layer0.output,
image_shape=(batch_size, nkerns[0], ONE, L2),
filter_shape=(nkerns[1], nkerns[0], 2, Cx2), poolsize=(1, 1), activation=ReLU, fac = 0)
ONE = (ONE - 2 + 1) /1
L3 = (L2 - Cx2 + 1) /1
#---------------------
Cx3 = 1
layer1b = ConvolutionalLayer(rng, input=layer1.output,
image_shape=(batch_size, nkerns[1], ONE, L3),
filter_shape=(nkerns[2], nkerns[1], 1, Cx3), poolsize=(1, POOL), activation=ReLU, fac = 0)
ONE = (ONE - 1 + 1) /1
L4 = (L3 - Cx3 + 1) /POOL
REGx = 100
#---------------------
layer2_input = layer1b.output.flatten(2)
print layer2_input.shape
use_b = False
layer2 = HiddenLayer(rng, input=layer2_input, n_in=nkerns[2]*L4 , n_out=REGx, activation=T.tanh,
use_bias = use_b)
layer3 = LogisticRegression(input=layer2.output, n_in=REGx, n_out=2)
cost = layer3.negative_log_likelihood(y)
out_x2 = theano.shared(np.asarray(np.zeros((N,L)), dtype=theano.config.floatX))
inone2 = theano.shared(np.asarray(np.zeros((1,L)), dtype=theano.config.floatX))
inone3 = theano.shared(np.asarray(np.zeros((1,L)), dtype=theano.config.floatX))
inone4 = theano.shared(np.asarray(np.zeros((1,L)), dtype=theano.config.floatX))
test_set_x = theano.shared(np.asarray(test_set_x, dtype=theano.config.floatX))
train_set_x = theano.shared(np.asarray(train_set_x0, dtype=theano.config.floatX))
train_set_y = T.cast(theano.shared(np.asarray(train_set_y0, dtype=theano.config.floatX)), 'int32')
test_set_y = T.cast(theano.shared(np.asarray(test_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_y = T.cast(theano.shared(np.asarray(valid_set_y, dtype=theano.config.floatX)), 'int32')
valid_set_x = theano.shared(np.asarray(valid_set_x, dtype=theano.config.floatX))
test_model = theano.function([index], layer3.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]})
validate_model = theano.function([index], layer3.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]})
mom_start = 0.5; mom_end = 0.98; mom_epoch_interval = n_epochs * 1.0
#### @@@@@@@@@@@
class_params0 = [layer3, layer2, layer1, layer1b, layer0]
class_params = [ param for layer in class_params0 for param in layer.params ]
gparams = []
for param in class_params:
gparam = T.grad(cost, param)
gparams.append(gparam)
gparams_mom = []
for param in class_params:
gparam_mom = theano.shared(np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX))
gparams_mom.append(gparam_mom)
mom = ifelse(epoch < mom_epoch_interval,
mom_start*(1.0 - epoch/mom_epoch_interval) + mom_end*(epoch/mom_epoch_interval),
mom_end)
updates = OrderedDict()
for gparam_mom, gparam in zip(gparams_mom, gparams):
updates[gparam_mom] = mom * gparam_mom - (1. - mom) * learning_rate * gparam
for param, gparam_mom in zip(class_params, gparams_mom):
stepped_param = param + updates[gparam_mom]
squared_filter_length_limit = 15.0
if param.get_value(borrow=True).ndim == 2:
col_norms = T.sqrt(T.sum(T.sqr(stepped_param), axis=0))
desired_norms = T.clip(col_norms, 0, T.sqrt(squared_filter_length_limit))
scale = desired_norms / (1e-7 + col_norms)
updates[param] = stepped_param * scale
else:
updates[param] = stepped_param
output = cost
train_model = theano.function(inputs=[epoch, index], outputs=output,
updates=updates,
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]})
keep = theano.function([index], layer3.errorsFull(y),
givens={
x: train_set_x[index * batch_size:(index + 1) * batch_size],
y: train_set_y[index * batch_size:(index + 1) * batch_size]}, on_unused_input='warn')
timer = time.clock()
print "finished reading", (timer - start_time0) /60. , "minutes "
# TRAIN MODEL #
print '... training'
validation_frequency = n_train_batches; best_params = None; best_validation_loss = np.inf
best_iter = 0; test_score = 0.; epochc = 0;
while (epochc < n_epochs):
epochc = epochc + 1
learning_rate = learning_rate0 * (1.2 - ((1.0 * epochc)/n_epochs))
for minibatch_index in xrange(n_train_batches):
iter = (epochc - 1) * n_train_batches + minibatch_index
cost_ij = train_model(epochc, minibatch_index)
if (iter + 1) % validation_frequency == 0:
validation_losses = [validate_model(i) for i in xrange(n_valid_batches)]
this_validation_loss = np.mean(validation_losses)
print(' %i) err %.2f ' % (epochc, this_validation_loss/10)), L, nkerns, REGx, "|", Cx, Cx2, Cx3, batch_size
if this_validation_loss < best_validation_loss or epochc % 30 == 0:
best_validation_loss = this_validation_loss
best_iter = iter
test_losses = [test_model(i) for i in xrange(n_test_batches)]
test_score = np.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of best '
'model %f %%') % (epochc, minibatch_index + 1, n_train_batches, test_score/10))
############
timel = time.clock()
print "finished learning", (timel - timer) /60. , "minutes "
ppm = theano.function([index], layer3.pred_proba_mine(),
givens={
x: T.horizontal_stack(T.tile(inone2, (batch_size ,1)),
out_x2[index * batch_size: (index + 1) * batch_size], T.tile(inone3, (batch_size ,1))),
y: train_set_y[0 * (batch_size): (0 + 1) * (batch_size)]
}, on_unused_input='warn')
NONZERO = (N*N-N)
gc.collect()
RESList = [np.zeros((N,N)) for it in range(ile__)]
for __net in range(ile__):
TO = TOList[__net]
ileO = ileList[__net]
RES = RESList[__net]
shift = 0.1
DELTAshift = (ileO-L) / (Q-1)
print "DELTAshift:", DELTAshift
for q in range (Q):
dataO = []; print (q+1),"/", Q , " ",
out_x2.set_value(np.asarray(np.array(TO[:,shift:shift+L]), dtype=theano.config.floatX))
PARTIAL = np.zeros((N,N))
inone3.set_value(np.asarray(np.array(TO[1000][shift:shift+L]).reshape(1,L), dtype=theano.config.floatX))
for i in range(N):
inone2.set_value(np.asarray(np.array(TO[i][shift:shift+L]).reshape(1,L), dtype=theano.config.floatX))
p = [ppm(ii) for ii in xrange( N / batch_size)]
for pos in range(N):
if pos != i:
PARTIAL[i][pos] += p[pos / batch_size][pos % batch_size][1]
for i in range(N):
for j in range(N):
RES[i][j] += PARTIAL[i][j]
shift += DELTAshift
print "Finished", __net
RESList[__net] = RES/np.max(RES)
gc.collect()
end_time = time.clock()
print "finished predicting", (end_time - timel) /60. , "minutes ", str(nkerns), "using SEED = ", SEED
print('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time0) / 60.))
return RESList
if __name__ == '__main__':
MY = 9 #(GPU)
VER = 1
if len(sys.argv)>1: # select random sequence seed
VER = int(sys.argv[1])
nkerns = [18, 40, 15]
if VER == 1:
SEED = 8001
if VER == 2:
SEED = 80001
if VER == 3:
SEED = 888
if VER == 4:
SEED = 8881
if VER == 5:
SEED = 8001
if VER == 6:
SEED = 100
if VER == 7:
SEED = 18000
if VER == 8:
SEED = 80801
start_time0 = time.clock()
print THREAD
if MY == 1 or MY == 5 or MY == 2 or MY==9:
path = "/firstly/set/path/here"
name = "normal-1"
s = "/"+name+"/fluorescence_"+name+".txt"
sn = "/"+name+"/network_"+name+".txt"
sp = "/"+name+"/networkPositions_"+name+".txt"
print name
TN1, CN1 , PN1 = read(s,sn,sp, Knormal)
gc.collect()
name0 = name
name = "normal-3"
s = "/"+name+"/fluorescence_"+name+".txt"
sn = "/"+name+"/network_"+name+".txt"
sp = "/"+name+"/networkPositions_"+name+".txt"
print name
TN2, CN2, PN2 = read(s,sn,sp, Knormal)
gc.collect()
name = "normal-2"
s = "/"+name+"/fluorescence_"+name+".txt"
sn = "/"+name+"/network_"+name+".txt"
sp = "/"+name+"/networkPositions_"+name+".txt"
print name
TN3, CN3 , PN3 = read(s,sn,sp, Knormal)
gc.collect()
if MY == 9:
print "reading valid..."
s = "/valid/fluorescence_valid.txt"
sn = None
sp = "/valid/networkPositions_valid.txt"
TV, _, PV = read(s,sn,sp, Knormal)
print "reading test..."
s = "/test/fluorescence_test.txt"
sn = None
sp = "/test/networkPositions_test.txt"
TT, _, PT = read(s,sn,sp, Knormal)
[RN2, RT, RV, RN3] = learnAndPredict(TN1, CN1, [TN2, TT, TV, TN3])
suff = np.random.randint(10000)
f = open("./res_ver"+str(VER)+".csv", 'w')
f.write("NET_neuronI_neuronJ,Strength\n")
for i in range (1000):
for j in range (1000):
f.write("valid_" +str(i+1)+"_"+str(j+1)+","+str(RV[i][j])+"\n")
for i in range (1000):
for j in range (1000):
f.write("test_" +str(i+1)+"_"+str(j+1)+","+str(RT[i][j])+"\n")
f.close()
print "Wrote solution of VER ==", str(VER)
RN2_ = RN2.flatten().tolist()
a = auc.auc(CN2.flatten().tolist(),RN2_)
RN3_ = RN3.flatten().tolist()
a2 = auc.auc(CN3.flatten().tolist(),RN3_)
print ("RES: %.2f learning (%.2f, %.2f)" % ((a+a2)*50, a*100, a2*100 ))
| lr292358/connectomics | run.py | Python | bsd-2-clause | 17,790 |
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for LUBackup*"""
from ganeti import constants
from ganeti import objects
from ganeti import opcodes
from ganeti import query
from testsupport import *
import testutils
class TestLUBackupPrepare(CmdlibTestCase):
@patchUtils("instance_utils")
def testPrepareLocalExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_LOCAL)
self.ExecOpCode(op)
@patchUtils("instance_utils")
def testPrepareRemoteExport(self, utils):
utils.ReadOneLineFile.return_value = "cluster_secret"
inst = self.cfg.AddNewInstance()
self.rpc.call_x509_cert_create.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(inst.primary_node,
("key_name",
testutils.ReadTestData("cert1.pem")))
op = opcodes.OpBackupPrepare(instance_name=inst.name,
mode=constants.EXPORT_MODE_REMOTE)
self.ExecOpCode(op)
class TestLUBackupExportBase(CmdlibTestCase):
def setUp(self):
super(TestLUBackupExportBase, self).setUp()
self.rpc.call_instance_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, True)
self.rpc.call_blockdev_assemble.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("/dev/mock_path",
"/dev/mock_link_name",
None))
self.rpc.call_blockdev_shutdown.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_blockdev_snapshot.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, ("mock_vg", "mock_id"))
self.rpc.call_blockdev_remove.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
self.rpc.call_export_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, "export_daemon")
def ImpExpStatus(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid,
[objects.ImportExportStatus(
exit_status=0
)])
self.rpc.call_impexp_status.side_effect = ImpExpStatus
def ImpExpCleanup(node_uuid, name):
return self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(node_uuid)
self.rpc.call_impexp_cleanup.side_effect = ImpExpCleanup
self.rpc.call_finalize_export.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.master, None)
def testRemoveRunningInstanceWithoutShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name,
shutdown=False,
remove_instance=True)
self.ExecOpCodeExpectOpPrereqError(
op, "Can not remove instance without shutting it down before")
def testUnsupportedDiskTemplate(self):
inst = self.cfg.AddNewInstance(disk_template=constants.DT_FILE)
op = opcodes.OpBackupExport(instance_name=inst.name,
target_node=self.master.name)
self.ExecOpCodeExpectOpPrereqError(
op, "Export not supported for instances with file-based disks")
class TestLUBackupExportLocalExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportLocalExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.target_node = self.cfg.AddNewNode()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_LOCAL,
instance_name=self.inst.name,
target_node=self.target_node.name)
self.rpc.call_import_start.return_value = \
self.RpcResultsBuilder() \
.CreateSuccessfulNodeResult(self.target_node, "import_daemon")
def testExportWithShutdown(self):
inst = self.cfg.AddNewInstance(admin_state=constants.ADMINST_UP)
op = self.CopyOpCode(self.op, instance_name=inst.name, shutdown=True)
self.ExecOpCode(op)
def testExportDeactivatedDisks(self):
self.ExecOpCode(self.op)
def testExportRemoveInstance(self):
op = self.CopyOpCode(self.op, remove_instance=True)
self.ExecOpCode(op)
def testValidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="lzop")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCode(op)
def testInvalidCompressionTool(self):
op = self.CopyOpCode(self.op, compress="invalid")
self.cfg.SetCompressionTools(["gzip", "lzop"])
self.ExecOpCodeExpectOpPrereqError(op, "Compression tool not allowed")
class TestLUBackupExportRemoteExport(TestLUBackupExportBase):
def setUp(self):
super(TestLUBackupExportRemoteExport, self).setUp()
self.inst = self.cfg.AddNewInstance()
self.op = opcodes.OpBackupExport(mode=constants.EXPORT_MODE_REMOTE,
instance_name=self.inst.name,
target_node=[],
x509_key_name=["mock_key_name"],
destination_x509_ca="mock_dest_ca")
def testRemoteExportWithoutX509KeyName(self):
op = self.CopyOpCode(self.op, x509_key_name=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing X509 key name for encryption")
def testRemoteExportWithoutX509DestCa(self):
op = self.CopyOpCode(self.op, destination_x509_ca=self.REMOVE)
self.ExecOpCodeExpectOpPrereqError(op,
"Missing destination X509 CA")
if __name__ == "__main__":
testutils.GanetiTestProgram()
| apyrgio/ganeti | test/py/cmdlib/backup_unittest.py | Python | bsd-2-clause | 7,522 |
from __future__ import absolute_import, print_function
import logging
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.pipeline import Pipeline
from sentry.models import Identity, IdentityStatus, IdentityProvider
from . import default_manager
IDENTITY_LINKED = _("Your {identity_provider} account has been associated with your Sentry account")
logger = logging.getLogger('sentry.identity')
class IdentityProviderPipeline(Pipeline):
logger = logger
pipeline_name = 'identity_provider'
provider_manager = default_manager
provider_model_cls = IdentityProvider
def redirect_url(self):
associate_url = reverse('sentry-extension-setup', kwargs={
# TODO(adhiraj): Remove provider_id from the callback URL, it's unused.
'provider_id': 'default',
})
# Use configured redirect_url if specified for the pipeline if available
return self.config.get('redirect_url', associate_url)
def finish_pipeline(self):
identity = self.provider.build_identity(self.state.data)
defaults = {
'status': IdentityStatus.VALID,
'scopes': identity.get('scopes', []),
'data': identity.get('data', {}),
'date_verified': timezone.now(),
}
identity, created = Identity.objects.get_or_create(
idp=self.provider_model,
user=self.request.user,
external_id=identity['id'],
defaults=defaults,
)
if not created:
identity.update(**defaults)
messages.add_message(self.request, messages.SUCCESS, IDENTITY_LINKED.format(
identity_provider=self.provider.name,
))
self.state.clear()
# TODO(epurkhiser): When we have more identities and have built out an
# identity management page that supports these new identities (not
# social-auth ones), redirect to the identities page.
return HttpResponseRedirect(reverse('sentry-account-settings'))
| ifduyue/sentry | src/sentry/identity/pipeline.py | Python | bsd-3-clause | 2,192 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['quantity_input']
import inspect
from astropy.utils.decorators import wraps
from astropy.utils.misc import isiterable
from .core import Unit, UnitBase, UnitsError, add_enabled_equivalencies
from .physical import _unit_physical_mapping
def _get_allowed_units(targets):
"""
From a list of target units (either as strings or unit objects) and physical
types, return a list of Unit objects.
"""
allowed_units = []
for target in targets:
try: # unit passed in as a string
target_unit = Unit(target)
except ValueError:
try: # See if the function writer specified a physical type
physical_type_id = _unit_physical_mapping[target]
except KeyError: # Function argument target is invalid
raise ValueError("Invalid unit or physical type '{}'."
.format(target))
# get unit directly from physical type id
target_unit = Unit._from_physical_type_id(physical_type_id)
allowed_units.append(target_unit)
return allowed_units
def _validate_arg_value(param_name, func_name, arg, targets, equivalencies):
"""
Validates the object passed in to the wrapped function, ``arg``, with target
unit or physical type, ``target``.
"""
if len(targets) == 0:
return
allowed_units = _get_allowed_units(targets)
for allowed_unit in allowed_units:
try:
is_equivalent = arg.unit.is_equivalent(allowed_unit,
equivalencies=equivalencies)
if is_equivalent:
break
except AttributeError: # Either there is no .unit or no .is_equivalent
if hasattr(arg, "unit"):
error_msg = "a 'unit' attribute without an 'is_equivalent' method"
else:
error_msg = "no 'unit' attribute"
raise TypeError("Argument '{}' to function '{}' has {}. "
"You may want to pass in an astropy Quantity instead."
.format(param_name, func_name, error_msg))
else:
if len(targets) > 1:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to one of: {}."
.format(param_name, func_name,
[str(targ) for targ in targets]))
else:
raise UnitsError("Argument '{}' to function '{}' must be in units"
" convertible to '{}'."
.format(param_name, func_name,
str(targets[0])))
class QuantityInput:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the decorator,
or by using function annotation syntax. Arguments to the decorator
take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator
or in the annotation.
If the argument has no unit attribute, i.e. it is not a Quantity object, a
`ValueError` will be raised unless the argument is an annotation. This is to
allow non Quantity annotations to pass through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, **kwargs):
self.equivalencies = kwargs.pop('equivalencies', [])
self.decorator_kwargs = kwargs
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL):
continue
# Catch the (never triggered) case where bind relied on a default value.
if param.name not in bound_args.arguments and param.default is not param.empty:
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isiterable(targets):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [t for t in valid_targets if isinstance(t, (str, UnitBase))]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(param.name, wrapped_function.__name__,
arg, valid_targets, self.equivalencies)
# Call the original function with any equivalencies in force.
with add_enabled_equivalencies(self.equivalencies):
return_ = wrapped_function(*func_args, **func_kwargs)
if wrapped_signature.return_annotation not in (inspect.Signature.empty, None):
return return_.to(wrapped_signature.return_annotation)
else:
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
| stargaser/astropy | astropy/units/decorators.py | Python | bsd-3-clause | 9,242 |
from __future__ import absolute_import
import unittest
import bokeh.resources as resources
from bokeh.resources import _get_cdn_urls
WRAPPER = """Bokeh.$(function() {
foo
});"""
WRAPPER_DEV = '''require(["jquery", "main"], function($, Bokeh) {
Bokeh.set_log_level("info");
Bokeh.$(function() {
foo
});
});'''
LOG_LEVELS = ['trace', 'debug', 'info', 'warn', 'error', 'fatal']
DEFAULT_LOG_JS_RAW = 'Bokeh.set_log_level("info");'
## Test JSResources
def test_js_resources_default_mode_is_inline():
r = resources.JSResources()
assert r.mode == "inline"
def test_js_resources_inline_has_no_css_resources():
r = resources.JSResources(mode="inline")
assert r.mode == "inline"
assert r.dev is False
assert len(r.js_raw) == 3
assert r.js_raw[-1] == DEFAULT_LOG_JS_RAW
assert hasattr(r, 'css_raw') is False
assert r.messages == []
## Test CSSResources
def test_css_resources_default_mode_is_inline():
r = resources.CSSResources()
assert r.mode == "inline"
def test_inline_css_resources():
r = resources.CSSResources(mode="inline")
assert r.mode == "inline"
assert r.dev is False
assert len(r.css_raw) == 2
assert hasattr(r, 'js_raw') is False
assert r.messages == []
class TestResources(unittest.TestCase):
def test_basic(self):
r = resources.Resources()
self.assertEqual(r.mode, "inline")
def test_log_level(self):
r = resources.Resources()
for level in LOG_LEVELS:
r.log_level = level
self.assertEqual(r.log_level, level)
if not r.dev:
self.assertEqual(r.js_raw[-1], 'Bokeh.set_log_level("%s");' % level)
self.assertRaises(ValueError, setattr, r, "log_level", "foo")
def test_module_attrs(self):
self.assertEqual(resources.CDN.mode, "cdn")
self.assertEqual(resources.INLINE.mode, "inline")
def test_inline(self):
r = resources.Resources(mode="inline")
self.assertEqual(r.mode, "inline")
self.assertEqual(r.dev, False)
self.assertEqual(len(r.js_raw), 3)
self.assertEqual(r.js_raw[-1], DEFAULT_LOG_JS_RAW)
self.assertEqual(len(r.css_raw), 2)
self.assertEqual(r.messages, [])
def test_get_cdn_urls(self):
dev_version = "0.0.1dev"
result = _get_cdn_urls(dev_version)
url = result['js_files'][0]
self.assertIn('bokeh/dev', url)
def test_cdn(self):
resources.__version__ = "1.0"
r = resources.Resources(mode="cdn", version="1.0")
self.assertEqual(r.mode, "cdn")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
resources.__version__ = "1.0-1-abc"
r = resources.Resources(mode="cdn", version="1.0")
self.assertEqual(r.messages, [
{'text': "Requesting CDN BokehJS version '1.0' from Bokeh development version '1.0-1-abc'. This configuration is unsupported and may not work!",
'type': 'warn'}
])
def test_server(self):
r = resources.Resources(mode="server")
self.assertEqual(r.mode, "server")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
r = resources.Resources(mode="server", root_url="http://foo/")
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_server_dev(self):
r = resources.Resources(mode="server-dev")
self.assertEqual(r.mode, "server")
self.assertEqual(r.dev, True)
self.assertEqual(len(r.js_raw), 1)
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
r = resources.Resources(mode="server-dev", root_url="http://foo/")
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_relative(self):
r = resources.Resources(mode="relative")
self.assertEqual(r.mode, "relative")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_relative_dev(self):
r = resources.Resources(mode="relative-dev")
self.assertEqual(r.mode, "relative")
self.assertEqual(r.dev, True)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_absolute(self):
r = resources.Resources(mode="absolute")
self.assertEqual(r.mode, "absolute")
self.assertEqual(r.dev, False)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_absolute_dev(self):
r = resources.Resources(mode="absolute-dev")
self.assertEqual(r.mode, "absolute")
self.assertEqual(r.dev, True)
self.assertEqual(r.js_raw, [DEFAULT_LOG_JS_RAW])
self.assertEqual(r.css_raw, [])
self.assertEqual(r.messages, [])
def test_argument_checks(self):
self.assertRaises(ValueError, resources.Resources, "foo")
for mode in ("inline", "cdn", "server", "server-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, root_dir="foo")
for mode in ("inline", "server", "server-dev", "relative", "relative-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, version="foo")
for mode in ("inline", "cdn", "relative", "relative-dev", "absolute", "absolute-dev"):
self.assertRaises(ValueError, resources.Resources, mode, root_url="foo")
| srinathv/bokeh | bokeh/tests/test_resources.py | Python | bsd-3-clause | 6,016 |
import pytest
from datetime import datetime
import pytz
import platform
import os
import numpy as np
import pandas as pd
from pandas import compat, DataFrame
from pandas.compat import range
pandas_gbq = pytest.importorskip('pandas_gbq')
PROJECT_ID = None
PRIVATE_KEY_JSON_PATH = None
PRIVATE_KEY_JSON_CONTENTS = None
if compat.PY3:
DATASET_ID = 'pydata_pandas_bq_testing_py3'
else:
DATASET_ID = 'pydata_pandas_bq_testing_py2'
TABLE_ID = 'new_test'
DESTINATION_TABLE = "{0}.{1}".format(DATASET_ID + "1", TABLE_ID)
VERSION = platform.python_version()
def _skip_if_no_project_id():
if not _get_project_id():
pytest.skip(
"Cannot run integration tests without a project id")
def _skip_if_no_private_key_path():
if not _get_private_key_path():
pytest.skip("Cannot run integration tests without a "
"private key json file path")
def _in_travis_environment():
return 'TRAVIS_BUILD_DIR' in os.environ and \
'GBQ_PROJECT_ID' in os.environ
def _get_project_id():
if _in_travis_environment():
return os.environ.get('GBQ_PROJECT_ID')
return PROJECT_ID or os.environ.get('GBQ_PROJECT_ID')
def _get_private_key_path():
if _in_travis_environment():
return os.path.join(*[os.environ.get('TRAVIS_BUILD_DIR'), 'ci',
'travis_gbq.json'])
private_key_path = PRIVATE_KEY_JSON_PATH
if not private_key_path:
private_key_path = os.environ.get('GBQ_GOOGLE_APPLICATION_CREDENTIALS')
return private_key_path
def clean_gbq_environment(private_key=None):
dataset = pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=private_key)
for i in range(1, 10):
if DATASET_ID + str(i) in dataset.datasets():
dataset_id = DATASET_ID + str(i)
table = pandas_gbq.gbq._Table(_get_project_id(), dataset_id,
private_key=private_key)
for j in range(1, 20):
if TABLE_ID + str(j) in dataset.tables(dataset_id):
table.delete(TABLE_ID + str(j))
dataset.delete(dataset_id)
def make_mixed_dataframe_v2(test_size):
# create df to test for all BQ datatypes except RECORD
bools = np.random.randint(2, size=(1, test_size)).astype(bool)
flts = np.random.randn(1, test_size)
ints = np.random.randint(1, 10, size=(1, test_size))
strs = np.random.randint(1, 10, size=(1, test_size)).astype(str)
times = [datetime.now(pytz.timezone('US/Arizona'))
for t in range(test_size)]
return DataFrame({'bools': bools[0],
'flts': flts[0],
'ints': ints[0],
'strs': strs[0],
'times': times[0]},
index=range(test_size))
@pytest.mark.single
class TestToGBQIntegrationWithServiceAccountKeyPath(object):
@classmethod
def setup_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *BEFORE*
# executing *ALL* tests described below.
_skip_if_no_project_id()
_skip_if_no_private_key_path()
clean_gbq_environment(_get_private_key_path())
pandas_gbq.gbq._Dataset(_get_project_id(),
private_key=_get_private_key_path()
).create(DATASET_ID + "1")
@classmethod
def teardown_class(cls):
# - GLOBAL CLASS FIXTURES -
# put here any instruction you want to execute only *ONCE* *AFTER*
# executing all tests.
clean_gbq_environment(_get_private_key_path())
def test_roundtrip(self):
destination_table = DESTINATION_TABLE + "1"
test_size = 20001
df = make_mixed_dataframe_v2(test_size)
df.to_gbq(destination_table, _get_project_id(), chunksize=None,
private_key=_get_private_key_path())
result = pd.read_gbq("SELECT COUNT(*) AS num_rows FROM {0}"
.format(destination_table),
project_id=_get_project_id(),
private_key=_get_private_key_path())
assert result['num_rows'][0] == test_size
| pratapvardhan/pandas | pandas/tests/io/test_gbq.py | Python | bsd-3-clause | 4,286 |
import networkx as nx
import matplotlib.pyplot as plt
try:
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
try:
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either "
"PyGraphviz or pydot")
G = nx.balanced_tree(3, 5)
pos = graphviz_layout(G, prog='twopi', args='')
plt.figure(figsize=(8, 8))
nx.draw(G, pos, node_size=20, alpha=0.5, node_color="blue", with_labels=False)
plt.axis('equal')
plt.savefig('circular_tree.png')
plt.show()
| jfinkels/networkx | examples/drawing/circular_tree.py | Python | bsd-3-clause | 639 |
import re
from six import text_type
"""Translate strings to and from SOAP 1.2 XML name encoding
Implements rules for mapping application defined name to XML names
specified by the w3 SOAP working group for SOAP version 1.2 in
Appendix A of "SOAP Version 1.2 Part 2: Adjuncts", W3C Working Draft
17, December 2001, <http://www.w3.org/TR/soap12-part2/#namemap>
Also see <http://www.w3.org/2000/xp/Group/xmlp-issues>.
Author: Gregory R. Warnes <[email protected]>
Date:: 2002-04-25
Version 0.9.0
"""
ident = "$Id$"
def _NCNameChar(x):
return x.isalpha() or x.isdigit() or x == "." or x == '-' or x == "_"
def _NCNameStartChar(x):
return x.isalpha() or x == "_"
def _toUnicodeHex(x):
hexval = hex(ord(x[0]))[2:]
hexlen = len(hexval)
# Make hexval have either 4 or 8 digits by prepending 0's
if (hexlen == 1):
hexval = "000" + hexval
elif (hexlen == 2):
hexval = "00" + hexval
elif (hexlen == 3):
hexval = "0" + hexval
elif (hexlen == 4):
hexval = "" + hexval
elif (hexlen == 5):
hexval = "000" + hexval
elif (hexlen == 6):
hexval = "00" + hexval
elif (hexlen == 7):
hexval = "0" + hexval
elif (hexlen == 8):
hexval = "" + hexval
else:
raise Exception("Illegal Value returned from hex(ord(x))")
return "_x" + hexval + "_"
def _fromUnicodeHex(x):
return eval(r'u"\u' + x[2:-1] + '"')
def toXMLname(string):
"""Convert string to a XML name."""
if string.find(':') != -1:
(prefix, localname) = string.split(':', 1)
else:
prefix = None
localname = string
T = text_type(localname)
N = len(localname)
X = []
for i in range(N):
if i < N - 1 and T[i] == u'_' and T[i + 1] == u'x':
X.append(u'_x005F_')
elif i == 0 and N >= 3 and \
(T[0] == u'x' or T[0] == u'X') and \
(T[1] == u'm' or T[1] == u'M') and \
(T[2] == u'l' or T[2] == u'L'):
X.append(u'_xFFFF_' + T[0])
elif (not _NCNameChar(T[i])) or (i == 0 and not _NCNameStartChar(T[i])):
X.append(_toUnicodeHex(T[i]))
else:
X.append(T[i])
if prefix:
return "%s:%s" % (prefix, u''.join(X))
return u''.join(X)
def fromXMLname(string):
"""Convert XML name to unicode string."""
retval = re.sub(r'_xFFFF_', '', string)
def fun(matchobj):
return _fromUnicodeHex(matchobj.group(0))
retval = re.sub(r'_x[0-9A-Fa-f]{4}_', fun, retval)
return retval
| pycontribs/wstools | wstools/XMLname.py | Python | bsd-3-clause | 2,575 |
# -*- coding: utf-8 -*-
"""
templatetricks.override_autoescaped
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Override which templates are autoescaped
http://flask.pocoo.org/snippets/41/
"""
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from flask import Flask
class JHtmlEscapingFlask(Flask):
def select_jinja_autoescape(self, filename):
if filename.endswith('.jhtml'):
return True
return Flask.select_jinja_autoescape(self, filename)
app = JHtmlEscapingFlask(__name__)
| fengsp/flask-snippets | templatetricks/override_autoescaped.py | Python | bsd-3-clause | 570 |
""" Query modules mapping functions to their query strings
structured:
module_name { query_string: function_for_query }
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import sys
import os
import math
import datetime
import logging
# logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
import random
from uuid import UUID
# Our imports
from emission.core.get_database import get_section_db, get_trip_db, get_routeCluster_db, get_alternatives_db
from . import trip_old as trip
# 0763de67-f61e-3f5d-90e7-518e69793954
# 0763de67-f61e-3f5d-90e7-518e69793954_20150421T230304-0700_0
# helper for getCanonicalTrips
def get_clusters_info(uid):
c_db = get_routeCluster_db()
s_db = get_section_db()
clusterJson = c_db.find_one({"clusters":{"$exists":True}, "user": uid})
if clusterJson is None:
return []
c_info = []
clusterSectionLists= list(clusterJson["clusters"].values())
logging.debug( "Number of section lists for user %s is %s" % (uid, len(clusterSectionLists)))
for sectionList in clusterSectionLists:
first = True
logging.debug( "Number of sections in sectionList for user %s is %s" % (uid, len(sectionList)))
if (len(sectionList) == 0):
# There's no point in returning this cluster, let's move on
continue
distributionArrays = [[] for _ in range(5)]
for section in sectionList:
section_json = s_db.find_one({"_id":section})
if first:
representative_trip = section_json
first = False
appendIfPresent(distributionArrays[0], section_json, "section_start_datetime")
appendIfPresent(distributionArrays[1], section_json, "section_end_datetime")
appendIfPresent(distributionArrays[2], section_json, "section_start_point")
appendIfPresent(distributionArrays[3], section_json, "section_end_point")
appendIfPresent(distributionArrays[4], section_json, "confirmed_mode")
c_info.append((distributionArrays, representative_trip))
return c_info
def appendIfPresent(list,element,key):
if element is not None and key in element:
list.append(element[key])
else:
logging.debug("not appending element %s with key %s" % (element, key))
class AlternativesNotFound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#returns the top trips for the user, defaulting to the top 10 trips
def getCanonicalTrips(uid, get_representative=False): # number returned isnt used
"""
uid is a UUID object, not a string
"""
# canonical_trip_list = []
# x = 0
# if route clusters return nothing, then get common routes for user
#clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
# c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
logging.debug('UUID for canonical %s' % uid)
info = get_clusters_info(uid)
cluster_json_list = []
for (cluster, rt) in info:
json_dict = dict()
json_dict["representative_trip"] = rt
json_dict["start_point_distr"] = cluster[2]
json_dict["end_point_distr"] = cluster[3]
json_dict["start_time_distr"] = cluster[0]
json_dict["end_time_distr"] = cluster[1]
json_dict["confirmed_mode_list"] = cluster[4]
cluster_json_list.append(json_dict)
toRet = cluster_json_list
return toRet.__iter__()
#returns all trips to the user
def getAllTrips(uid):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getAllTrips_Date(uid, dys):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
return get_trip_db().find(query)
#returns all trips with no alternatives to the user
def getNoAlternatives(uid):
# If pipelineFlags exists then we have started alternatives, and so have
# already scheduled the query. No need to reschedule unless the query fails.
# TODO: If the query fails, then remove the pipelineFlags so that we will
# reschedule.
query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
def getNoAlternativesPastMonth(uid):
d = datetime.datetime.now() - datetime.timedelta(days=30)
query = {'user_id':uid, 'type':'move',
'trip_start_datetime':{"$gt":d},
'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
# Returns the trips that are suitable for training
# Currently this is:
# - trips that have alternatives, and
# - have not yet been included in a training set
def getTrainingTrips(uid):
return getTrainingTrips_Date(uid, 30)
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getTrainingTrips_Date(uid, dys):
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}, "pipelineFlags":{"$exists":True}}
#query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
#print get_trip_db().count_documents(query)
return get_trip_db().find(query)
def getAlternativeTrips(trip_id):
#TODO: clean up datetime, and queries here
#d = datetime.datetime.now() - datetime.timedelta(days=6)
#query = {'trip_id':trip_id, 'trip_start_datetime':{"$gt":d}}
query = {'trip_id':trip_id}
alternatives = get_alternatives_db().find(query)
if alternatives.estimated_document_count() > 0:
logging.debug("Number of alternatives for trip %s is %d" % (trip_id, alternatives.estimated_document_count()))
return alternatives
raise AlternativesNotFound("No Alternatives Found")
def getRecentTrips(uid):
raise NotImplementedError()
def getTripsThroughMode(uid):
raise NotImplementedError()
modules = {
# Trip Module
'trips': {
'get_canonical': getCanonicalTrips,
'get_all': getAllTrips,
'get_no_alternatives': getNoAlternatives,
'get_no_alternatives_past_month': getNoAlternativesPastMonth,
'get_most_recent': getRecentTrips,
'get_trips_by_mode': getTripsThroughMode},
# Utility Module
'utility': {
'get_training': getTrainingTrips
},
# Recommender Module
'recommender': {
'get_improve': getCanonicalTrips
},
#Perturbation Module
'perturbation': {},
#Alternatives Module
# note: uses a different collection than section_db
'alternatives': {
'get_alternatives': getAlternativeTrips
}
}
| e-mission/e-mission-server | emission/core/wrapper/filter_modules.py | Python | bsd-3-clause | 7,166 |
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""
RINGFILTER determines the center coordinates of a ring, bins the ring radially and computes its power spectrum, and allows the user to select a smoothing filter for the ring. It uses T. Williams code. The code assumes all the files are in the same directory. Also assumes that if there is a config file, it is also in the same directory as the data. Note that this config file is in the original FORTRAN code format so that the user does not have to write another file.
Updates:
20100706
* First wrote the code
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import numpy as np
#import pyfits
from pyraf import iraf
from pyraf.iraf import pysalt
import saltsafekey
import saltsafeio
import fpsafeio
from saltsafelog import logging
from salterror import SaltIOError
# This reads the FORTRAN config file if it exists
from fortranfp import ringfilter_wrapper
from fortranfp.ringfilter_wrapper import getpfp
debug=True
def saltfpringfilter(axc,ayc,arad,rxc,ryc,filterfreq,filterwidth,itmax,conv, fitwidth,image,logfile,useconfig,configfile,verbose):
""" Determines the center coordinates of a ring, bins the ring radially and computes its power spectrum, and allows the user to select a smoothing filter for the ring. """
# default parameter values are set up in the pyraf .par file. The values used are then changed if a FORTRAN config file exists and the user elects to override the pyraf .par file.
# Is the input FORTRAN config file specified?
# If it is blank, then it will be ignored.
if useconfig:
configfile = configfile.strip()
if len(configfile) > 0:
#check exists
saltsafeio.fileexists(configfile)
# read updated parameters from the file
array=getpfp(configfile,"axc")
s=len(array)
flag = array[s-1]
if flag == 1:
axc=float(array[0])
array=getpfp(configfile,"ayc")
s=len(array)
flag = array[s-1]
if flag == 1:
ayc=float(array[0])
array=getpfp(configfile,"arad")
s=len(array)
flag = array[s-1]
if flag == 1:
arad=float(array[0])
array=getpfp(configfile,"rxc")
s=len(array)
flag = array[s-1]
if flag == 1:
rxc=float(array[0])
array=getpfp(configfile,"ryc")
s=len(array)
flag = array[s-1]
if flag == 1:
ryc=float(array[0])
array=getpfp(configfile,"calring_filter_width")
s=len(array)
flag = array[s-1]
if flag == 1:
filterwidth=int(array[0])
array=getpfp(configfile,"calring_filter_freq")
s=len(array)
flag = array[s-1]
if flag == 1:
filterfreq=int(array[0])
array=getpfp(configfile,"calring_itmax")
s=len(array)
flag = array[s-1]
if flag == 1:
itmax=int(array[0])
array=getpfp(configfile,"calring_conv")
s=len(array)
flag = array[s-1]
if flag == 1:
conv=float(array[0])
array=getpfp(configfile,"calring_fitwidth")
s=len(array)
flag = array[s-1]
if flag == 1:
fitwidth=float(array[0])
# getting paths for filenames
pathin = os.path.dirname(image)
basein = os.path.basename(image)
pathlog = os.path.dirname(logfile)
baselog = os.path.basename(logfile)
# forcing logfiles to be created in the same directory as the input data
# (we change to this directory once starting the fortran code)
if len(pathin) > 0:
logfile = baselog
# start log now that all parameter are set up
with logging(logfile, debug) as log:
# Some basic checks, many tests are done in the FORTRAN code itself
# is the input file specified?
saltsafeio.filedefined('Input',image)
# if the input file is a file, does it exist?
if basein[0] != '@':
saltsafeio.fileexists(image)
infile = image
# if the input file is a list, throw an error
if basein[0] == '@':
raise SaltIOError(basein + ' list input instead of a file' )
# optionally update the FORTRAN config file with new values - not implemented currently
# If all looks OK, run the FORTRAN code
if len(pathin) > 0:
dir = pathin
else:
dir = './'
infile = basein
print dir, infile, 'input directory and input file'
# Get current working directory as the Fortran code changes dir
startdir = os.getcwd()
ringfilter_wrapper.ringfilter(dir,axc, ayc,arad, rxc,ryc,filterfreq,filterwidth,itmax,conv,fitwidth,infile)
# go back to starting directory
os.chdir(startdir)
# -----------------------------------------------------------
# main code
parfile = iraf.osfn("saltfp$saltfpringfilter.par")
t = iraf.IrafTaskFactory(taskname="saltfpringfilter",value=parfile,function=saltfpringfilter,pkgname='saltfp')
| saltastro/pysalt | saltfp/saltfpringfilter.py | Python | bsd-3-clause | 7,647 |
__author__ = 'keltonhalbert, wblumberg'
from sharppy.viz import plotSkewT, plotHodo, plotText, plotAnalogues
from sharppy.viz import plotThetae, plotWinds, plotSpeed, plotKinematics #, plotGeneric
from sharppy.viz import plotSlinky, plotWatch, plotAdvection, plotSTP, plotWinter
from sharppy.viz import plotSHIP, plotSTPEF, plotFire, plotVROT
from PySide.QtCore import *
from PySide.QtGui import *
import sharppy.sharptab.profile as profile
import sharppy.sharptab as tab
import sharppy.io as io
from datetime import datetime, timedelta
import numpy as np
import platform
from os.path import expanduser
import os
from sharppy.version import __version__, __version_name__
class SPCWidget(QWidget):
"""
This will create the full SPC window, handle the organization
of the insets, and handle all click/key events and features.
"""
inset_generators = {
'SARS':plotAnalogues,
'STP STATS':plotSTP,
'COND STP':plotSTPEF,
'WINTER':plotWinter,
'FIRE':plotFire,
'SHIP':plotSHIP,
'VROT':plotVROT,
}
inset_names = {
'SARS':'Sounding Analogues',
'STP STATS':'Sig-Tor Stats',
'COND STP':'EF-Scale Probs (Sig-Tor)',
'WINTER':'Winter Weather',
'FIRE':'Fire Weather',
'SHIP':'Sig-Hail Stats',
'VROT':'EF-Scale Probs (V-Rot)',
}
def __init__(self, **kwargs):
parent = kwargs.get('parent', None)
super(SPCWidget, self).__init__(parent=parent)
"""
"""
## these are the keyword arguments used to define what
## sort of profile is being viewed
self.prof_collections = []
self.prof_ids = []
self.default_prof = None
self.pc_idx = 0
self.config = kwargs.get("cfg")
self.dgz = False
self.mode = ""
## these are used to display profiles
self.parcel_type = "MU"
self.coll_observed = False
if not self.config.has_section('insets'):
self.config.add_section('insets')
self.config.set('insets', 'right_inset', 'STP STATS')
self.config.set('insets', 'left_inset', 'SARS')
if not self.config.has_section('parcel_types'):
self.config.add_section('parcel_types')
self.config.set('parcel_types', 'pcl1', 'SFC')
self.config.set('parcel_types', 'pcl2', 'ML')
self.config.set('parcel_types', 'pcl3', 'FCST')
self.config.set('parcel_types', 'pcl4', 'MU')
if not self.config.has_option('paths', 'save_img'):
self.config.set('paths', 'save_img', expanduser('~'))
self.config.set('paths', 'save_txt', expanduser('~'))
## these are the boolean flags used throughout the program
self.swap_inset = False
## initialize empty variables to hold objects that will be
## used later
self.left_inset_ob = None
self.right_inset_ob = None
## these are used for insets and inset swapping
insets = sorted(SPCWidget.inset_names.items(), key=lambda i: i[1])
inset_ids, inset_names = zip(*insets)
self.available_insets = inset_ids
self.left_inset = self.config.get('insets', 'left_inset')
self.right_inset = self.config.get('insets', 'right_inset')
self.insets = {}
self.parcel_types = [self.config.get('parcel_types', 'pcl1'), self.config.get('parcel_types', 'pcl2'), \
self.config.get('parcel_types', 'pcl3'),self.config.get('parcel_types', 'pcl4')]
## initialize the rest of the window attributes, layout managers, etc
self.setStyleSheet("QWidget {background-color: rgb(0, 0, 0);}")
## set the the whole window's layout manager
self.grid = QGridLayout()
self.grid.setContentsMargins(1,1,1,1)
self.grid.setHorizontalSpacing(0)
self.grid.setVerticalSpacing(2)
self.setLayout(self.grid)
## handle the upper right portion of the window...
## hodograph, SRWinds, Storm Slinky, theta-e all go in this frame
self.urparent = QFrame()
self.urparent_grid = QGridLayout()
self.urparent_grid.setContentsMargins(0, 0, 0, 0)
self.urparent_grid.setVerticalSpacing(0)
self.urparent.setLayout(self.urparent_grid)
self.ur = QFrame()
self.ur.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 0px;"
" border-style: solid;"
" border-color: rgb(255, 255, 255);"
" margin: 0px;}")
self.brand = QLabel("SHARPpy Beta v%s %s" % (__version__, __version_name__))
self.brand.setAlignment(Qt.AlignRight)
self.brand.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" text-align: right;"
" padding-top: 4px;"
" padding-bottom: 4px;"
" font-size: 11px;"
" color: #FFFFFF;}")
## this layout manager will handle the upper right portion of the window
self.grid2 = QGridLayout()
self.grid2.setHorizontalSpacing(0)
self.grid2.setVerticalSpacing(0)
self.grid2.setContentsMargins(0, 0, 0, 0)
self.ur.setLayout(self.grid2)
self.urparent_grid.addWidget(self.brand, 0, 0, 1, 0)
self.urparent_grid.addWidget(self.ur, 1, 0, 50, 0)
## add the upper-right frame to the main frame
self.grid.addWidget(self.urparent, 0, 1, 3, 1)
## Handle the Text Areas
self.text = QFrame()
self.text.setStyleSheet("QWidget {"
" background-color: rgb(0, 0, 0);"
" border-width: 2px;"
" border-style: solid;"
" border-color: #3399CC;}")
self.grid3 = QGridLayout()
self.grid3.setHorizontalSpacing(0)
self.grid3.setContentsMargins(0, 0, 0, 0)
self.text.setLayout(self.grid3)
## set to menu stuff
self.setUpdatesEnabled(True)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showCursorMenu)
## initialize the data frames
self.initData()
self.loadWidgets()
def getParcelObj(self, prof, name):
if name == "SFC":
return prof.sfcpcl
elif name == "ML":
return prof.mlpcl
elif name == "FCST":
return prof.fcstpcl
elif name == "MU":
return prof.mupcl
elif name == 'EFF':
return prof.effpcl
elif name == "USER":
return prof.usrpcl
def getParcelName(self, prof, pcl):
if pcl == prof.sfcpcl:
return "SFC"
elif pcl == prof.mlpcl:
return "ML"
elif pcl == prof.fcstpcl:
return "FCST"
elif pcl == prof.mupcl:
return "MU"
elif pcl == prof.effpcl:
return "EFF"
elif pcl == prof.usrpcl:
return "USER"
def saveimage(self):
path = self.config.get('paths', 'save_img')
file_types = "PNG (*.png)"
file_name, result = QFileDialog.getSaveFileName(self, "Save Image", path, file_types)
if result:
pixmap = QPixmap.grabWidget(self)
pixmap.save(file_name, 'PNG', 100)
self.config.set('paths', 'save_img', os.path.dirname(file_name))
def savetext(self):
path = self.config.get('paths', 'save_txt')
file_types = "TXT (*.txt)"
file_name, result = QFileDialog.getSaveFileName(self, "Save Sounding Text", path, file_types)
if result:
self.default_prof.toFile(file_name)
self.config.set('paths', 'save_txt', os.path.dirname(file_name))
def initData(self):
"""
Initializes all the widgets for the window.
This gets initially called by __init__
:return:
"""
self.sound = plotSkewT(dgz=self.dgz)
self.hodo = plotHodo()
## initialize the non-swappable insets
self.speed_vs_height = plotSpeed()
self.inferred_temp_advection = plotAdvection()
self.storm_slinky = plotSlinky()
self.thetae_vs_pressure = plotThetae()
self.srwinds_vs_height = plotWinds()
self.watch_type = plotWatch()
self.convective = plotText(self.parcel_types)
self.kinematic = plotKinematics()
# intialize swappable insets
for inset, inset_gen in SPCWidget.inset_generators.iteritems():
self.insets[inset] = inset_gen()
self.right_inset_ob = self.insets[self.right_inset]
self.left_inset_ob = self.insets[self.left_inset]
# Connect signals to slots
self.convective.updatepcl.connect(self.updateParcel)
self.sound.parcel.connect(self.defineUserParcel)
self.sound.modified.connect(self.modifyProf)
self.sound.reset.connect(self.resetProfModifications)
self.hodo.modified.connect(self.modifyProf)
self.hodo.reset.connect(self.resetProfModifications)
self.insets["SARS"].updatematch.connect(self.updateSARS)
def addProfileCollection(self, prof_col, prof_id, focus=True):
self.prof_collections.append(prof_col)
self.prof_ids.append(prof_id)
self.sound.addProfileCollection(prof_col)
self.hodo.addProfileCollection(prof_col)
if focus:
self.pc_idx = len(self.prof_collections) - 1
if not prof_col.getMeta('observed'):
self.coll_observed = False
self.sound.setAllObserved(self.coll_observed, update_gui=False)
self.hodo.setAllObserved(self.coll_observed, update_gui=False)
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
for prof_col in self.prof_collections:
if not prof_col.getMeta('observed'):
prof_col.setCurrentDate(cur_dt)
self.updateProfs()
@Slot(str)
def setProfileCollection(self, prof_id):
try:
self.pc_idx = self.prof_ids.index(prof_id)
except ValueError:
print "Hmmm, that profile doesn't exist to be focused ..."
return
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
for prof_col in self.prof_collections:
if not prof_col.getMeta('observed'):
prof_col.setCurrentDate(cur_dt)
self.updateProfs()
def rmProfileCollection(self, prof_id):
try:
pc_idx = self.prof_ids.index(prof_id)
except ValueError:
print "Hmmm, that profile doesn't exist to be removed ..."
prof_col = self.prof_collections.pop(pc_idx)
self.prof_ids.pop(pc_idx)
self.sound.rmProfileCollection(prof_col)
self.hodo.rmProfileCollection(prof_col)
# If we've removed an analog, remove it from the profile it's an analog to.
if prof_col.hasMeta('filematch'):
filematch = prof_col.getMeta('filematch')
for pc in self.prof_collections:
if pc.hasMeta('analogfile'):
keys, vals = zip(*pc.getMeta('analogfile').items())
if filematch in vals:
keys = list(keys); vals = list(vals)
idx = vals.index(filematch)
vals.pop(idx)
keys.pop(idx)
pc.setMeta('analogfile', dict(zip(keys, vals)))
self.insets['SARS'].clearSelection()
if self.pc_idx == pc_idx:
self.pc_idx = 0
elif self.pc_idx > pc_idx:
self.pc_idx -= 1
self.updateProfs()
def isAllObserved(self):
return all( pc.getMeta('observed') for pc in self.prof_collections )
def isInterpolated(self):
return self.prof_collections[self.pc_idx].isInterpolated()
def updateProfs(self):
prof_col = self.prof_collections[self.pc_idx]
self.default_prof = prof_col.getHighlightedProf()
# update the profiles
self.sound.setActiveCollection(self.pc_idx, update_gui=False)
self.hodo.setActiveCollection(self.pc_idx)
self.storm_slinky.setProf(self.default_prof)
self.inferred_temp_advection.setProf(self.default_prof)
self.speed_vs_height.setProf(self.default_prof)
self.srwinds_vs_height.setProf(self.default_prof)
self.thetae_vs_pressure.setProf(self.default_prof)
self.watch_type.setProf(self.default_prof)
self.convective.setProf(self.default_prof)
self.kinematic.setProf(self.default_prof)
for inset in self.insets.keys():
self.insets[inset].setProf(self.default_prof)
# Update the parcels to match the new profiles
parcel = self.getParcelObj(self.default_prof, self.parcel_type)
self.sound.setParcel(parcel)
self.storm_slinky.setParcel(parcel)
@Slot(tab.params.Parcel)
def updateParcel(self, pcl):
self.parcel_type = self.getParcelName(self.default_prof, pcl)
self.sound.setParcel(pcl)
self.storm_slinky.setParcel(pcl)
self.config.set('parcel_types', 'pcl1', self.convective.pcl_types[0])
self.config.set('parcel_types', 'pcl2', self.convective.pcl_types[1])
self.config.set('parcel_types', 'pcl3', self.convective.pcl_types[2])
self.config.set('parcel_types', 'pcl4', self.convective.pcl_types[3])
@Slot(str)
def updateSARS(self, filematch):
prof_col = self.prof_collections[self.pc_idx]
dec = io.spc_decoder.SPCDecoder(filematch)
match_col = dec.getProfiles()
match_col.setMeta('model', 'Analog')
match_col.setMeta('run', prof_col.getCurrentDate())
match_col.setMeta('fhour', None)
match_col.setMeta('observed', True)
match_col.setMeta('filematch', filematch)
match_col.setAnalogToDate(prof_col.getCurrentDate())
dt = prof_col.getCurrentDate()
if prof_col.hasMeta('analogfile'):
analogfiles = prof_col.getMeta('analogfile')
analogfiles[dt] = filematch
else:
analogfiles = {dt:filematch}
prof_col.setMeta('analogfile', analogfiles)
self.parentWidget().addProfileCollection(match_col, focus=False)
@Slot(tab.params.Parcel)
def defineUserParcel(self, parcel):
self.prof_collections[self.pc_idx].defineUserParcel(parcel)
self.updateProfs()
self.setFocus()
@Slot(int, dict)
def modifyProf(self, idx, kwargs):
self.prof_collections[self.pc_idx].modify(idx, **kwargs)
self.updateProfs()
self.setFocus()
def interpProf(self):
self.prof_collections[self.pc_idx].interp()
self.updateProfs()
self.setFocus()
@Slot(list)
def resetProfModifications(self, args):
self.prof_collections[self.pc_idx].resetModification(*args)
self.updateProfs()
self.setFocus()
def resetProfInterpolation(self):
self.prof_collections[self.pc_idx].resetInterpolation()
self.updateProfs()
self.setFocus()
@Slot()
def toggleCollectObserved(self):
self.coll_observed = not self.coll_observed
self.sound.setAllObserved(self.coll_observed)
self.hodo.setAllObserved(self.coll_observed)
def loadWidgets(self):
## add the upper-right window insets
self.grid2.addWidget(self.speed_vs_height, 0, 0, 11, 3)
self.grid2.addWidget(self.inferred_temp_advection, 0, 3, 11, 2)
self.grid2.addWidget(self.hodo, 0, 5, 8, 24)
self.grid2.addWidget(self.storm_slinky, 8, 5, 3, 6)
self.grid2.addWidget(self.thetae_vs_pressure, 8, 11, 3, 6)
self.grid2.addWidget(self.srwinds_vs_height, 8, 17, 3, 6)
self.grid2.addWidget(self.watch_type, 8, 23, 3, 6)
# Draw the kinematic and convective insets
self.grid3.addWidget(self.convective, 0, 0)
self.grid3.addWidget(self.kinematic, 0, 1)
# Set Left Inset
self.grid3.addWidget(self.left_inset_ob, 0, 2)
# Set Right Inset
self.grid3.addWidget(self.right_inset_ob, 0, 3)
## do a check for setting the dendretic growth zone
if self.left_inset == "WINTER" or self.right_inset == "WINTER":
self.sound.setDGZ(True)
self.dgz = True
self.grid.addWidget(self.sound, 0, 0, 3, 1)
self.grid.addWidget(self.text, 3, 0, 1, 2)
def advanceTime(self, direction):
if len(self.prof_collections) == 0 or self.coll_observed:
return
prof_col = self.prof_collections[self.pc_idx]
if prof_col.getMeta('observed'):
cur_dt = prof_col.getCurrentDate()
cur_loc = prof_col.getMeta('loc')
idxs, dts = zip(*sorted(((idx, pc.getCurrentDate()) for idx, pc in enumerate(self.prof_collections) if pc.getMeta('loc') == cur_loc and pc.getMeta('observed')), key=lambda x: x[1]))
dt_idx = dts.index(cur_dt)
dt_idx = (dt_idx + direction) % len(dts)
self.pc_idx = idxs[dt_idx]
cur_dt = self.prof_collections[self.pc_idx].getCurrentDate()
else:
cur_dt = prof_col.advanceTime(direction)
for prof_col in self.prof_collections:
if not prof_col.getMeta('observed'):
prof_col.setCurrentDate(cur_dt)
self.parcel_types = self.convective.pcl_types
self.updateProfs()
prof_col = self.prof_collections[self.pc_idx]
if prof_col.hasMeta('analogfile'):
match = prof_col.getMeta('analogfile')
dt = prof_col.getCurrentDate()
if dt in match:
self.insets['SARS'].setSelection(match[dt])
else:
self.insets['SARS'].clearSelection()
else:
self.insets['SARS'].clearSelection()
def swapProfCollections(self):
# See if we have any other observed profiles loaded at this time.
prof_col = self.prof_collections[self.pc_idx]
dt = prof_col.getCurrentDate()
idxs, pcs = zip(*[ (idx, pc) for idx, pc in enumerate(self.prof_collections) if pc.getCurrentDate() == dt or self.coll_observed ])
loc_idx = pcs.index(prof_col)
loc_idx = (loc_idx + 1) % len(pcs)
self.pc_idx = idxs[loc_idx]
self.updateProfs()
if self.prof_collections[self.pc_idx].hasMeta('analogfile'):
match = self.prof_collections[self.pc_idx].getMeta('analogfile')
dt = prof_col.getCurrentDate()
if dt in match:
self.insets['SARS'].setSelection(match[dt])
else:
self.insets['SARS'].clearSelection()
else:
self.insets['SARS'].clearSelection()
def closeEvent(self, e):
self.sound.closeEvent(e)
for prof_coll in self.prof_collections:
prof_coll.cancelCopy()
def makeInsetMenu(self, *exclude):
# This will make the menu of the available insets.
self.popupmenu=QMenu("Inset Menu")
self.menu_ag = QActionGroup(self, exclusive=True)
for inset in self.available_insets:
if inset not in exclude:
inset_action = QAction(self)
inset_action.setText(SPCWidget.inset_names[inset])
inset_action.setData(inset)
inset_action.setCheckable(True)
inset_action.triggered.connect(self.swapInset)
a = self.menu_ag.addAction(inset_action)
self.popupmenu.addAction(a)
def showCursorMenu(self, pos):
self.makeInsetMenu(self.left_inset, self.right_inset)
if self.childAt(pos.x(), pos.y()) is self.right_inset_ob:
self.inset_to_swap = "RIGHT"
self.popupmenu.popup(self.mapToGlobal(pos))
self.setFocus()
elif self.childAt(pos.x(), pos.y()) is self.left_inset_ob:
self.inset_to_swap = "LEFT"
self.popupmenu.popup(self.mapToGlobal(pos))
self.setFocus()
def swapInset(self):
## This will swap either the left or right inset depending on whether or not the
## self.inset_to_swap value is LEFT or RIGHT.
a = self.menu_ag.checkedAction()
if self.inset_to_swap == "LEFT":
if self.left_inset == "WINTER" and self.dgz:
self.sound.setDGZ(False)
self.dgz = False
# Delete and re-make the inset. For some stupid reason, pyside/QT forces you to
# delete something you want to remove from the layout.
self.left_inset_ob.deleteLater()
self.insets[self.left_inset] = SPCWidget.inset_generators[self.left_inset]()
self.insets[self.left_inset].setProf(self.default_prof)
self.left_inset = a.data()
self.left_inset_ob = self.insets[self.left_inset]
self.grid3.addWidget(self.left_inset_ob, 0, 2)
self.config.set('insets', 'left_inset', self.left_inset)
elif self.inset_to_swap == "RIGHT":
if self.right_inset == "WINTER" and self.dgz:
self.sound.setDGZ(False)
self.dgz = False
# Delete and re-make the inset. For some stupid reason, pyside/QT forces you to
# delete something you want to remove from the layout.
self.right_inset_ob.deleteLater()
self.insets[self.right_inset] = SPCWidget.inset_generators[self.right_inset]()
self.insets[self.right_inset].setProf(self.default_prof)
self.right_inset = a.data()
self.right_inset_ob = self.insets[self.right_inset]
self.grid3.addWidget(self.right_inset_ob, 0, 3)
self.config.set('insets', 'right_inset', self.right_inset)
if a.data() == "WINTER":
self.sound.setDGZ(True)
self.dgz = True
self.setFocus()
self.update()
class SPCWindow(QMainWindow):
closed = Signal()
def __init__(self, **kwargs):
parent = kwargs.get('parent', None)
super(SPCWindow, self).__init__()
self.menu_items = []
self.picker_window = parent
self.__initUI(**kwargs)
def __initUI(self, **kwargs):
kwargs['parent'] = self
self.spc_widget = SPCWidget(**kwargs)
self.setCentralWidget(self.spc_widget)
self.createMenuBar()
title = 'SHARPpy: Sounding and Hodograph Analysis and Research Program '
title += 'in Python'
self.setWindowTitle(title)
self.setStyleSheet("QMainWindow { background-color: rgb(0, 0, 0); }")
## handle the attribute of the main window
if platform.system() == 'Windows':
self.setGeometry(10,30,1180,800)
else:
self.setGeometry(0, 0, 1180, 800)
self.show()
self.raise_()
def createMenuBar(self):
bar = self.menuBar()
filemenu = bar.addMenu("File")
saveimage = QAction("Save Image", self, shortcut=QKeySequence("Ctrl+S"))
saveimage.triggered.connect(self.spc_widget.saveimage)
filemenu.addAction(saveimage)
savetext = QAction("Save Text", self, shortcut=QKeySequence("Ctrl+Shift+S"))
savetext.triggered.connect(self.spc_widget.savetext)
filemenu.addAction(savetext)
self.profilemenu = bar.addMenu("Profiles")
self.allobserved = QAction("Collect Observed", self, checkable=True, shortcut=QKeySequence("C"))
self.allobserved.triggered.connect(self.spc_widget.toggleCollectObserved)
self.profilemenu.addAction(self.allobserved)
self.interpolate = QAction("Interpolate Focused Profile", self, shortcut=QKeySequence("I"))
self.interpolate.triggered.connect(self.interpProf)
self.profilemenu.addAction(self.interpolate)
self.resetinterp = QAction("Reset Interpolation", self, shortcut=QKeySequence("I"))
self.resetinterp.triggered.connect(self.resetProf)
self.resetinterp.setVisible(False)
self.profilemenu.addAction(self.resetinterp)
self.profilemenu.addSeparator()
self.focus_mapper = QSignalMapper(self)
self.remove_mapper = QSignalMapper(self)
self.focus_mapper.mapped[str].connect(self.spc_widget.setProfileCollection)
self.remove_mapper.mapped[str].connect(self.rmProfileCollection)
def createProfileMenu(self, prof_col):
menu_name = self.createMenuName(prof_col)
prof_menu = self.profilemenu.addMenu(menu_name)
focus = QAction("Focus", self)
focus.triggered.connect(self.focus_mapper.map)
self.focus_mapper.setMapping(focus, menu_name)
prof_menu.addAction(focus)
remove = QAction("Remove", self)
remove.triggered.connect(self.remove_mapper.map)
self.remove_mapper.setMapping(remove, menu_name)
prof_menu.addAction(remove)
if len(self.menu_items) == 0:
remove.setVisible(False)
self.menu_items.append(prof_menu)
def removeProfileMenu(self, menu_name):
menu_items = [ mitem for mitem in self.menu_items if mitem.title() == menu_name ]
for mitem in menu_items:
mitem.menuAction().setVisible(False)
def addProfileCollection(self, prof_col, focus=True):
menu_name = self.createMenuName(prof_col)
if any( mitem.title() == menu_name and mitem.menuAction().isVisible() for mitem in self.menu_items ):
self.spc_widget.setProfileCollection(menu_name)
return
if not prof_col.getMeta('observed'):
self.allobserved.setDisabled(True)
self.allobserved.setChecked(False)
self.createProfileMenu(prof_col)
visible_mitems = [ mitem for mitem in self.menu_items if mitem.menuAction().isVisible() ]
if len(visible_mitems) > 1:
actions = visible_mitems[0].actions()
names = [ act.text() for act in actions ]
actions[names.index("Remove")].setVisible(True)
try:
self.spc_widget.addProfileCollection(prof_col, menu_name, focus=focus)
except Exception as exc:
self.abortProfileAdd(menu_name, str(exc))
@Slot(str)
def rmProfileCollection(self, menu_name):
self.removeProfileMenu(menu_name)
self.spc_widget.rmProfileCollection(menu_name)
if self.spc_widget.isAllObserved():
self.allobserved.setDisabled(False)
visible_mitems = [ mitem for mitem in self.menu_items if mitem.menuAction().isVisible() ]
if len(visible_mitems) == 1:
actions = visible_mitems[0].actions()
names = [ act.text() for act in actions ]
actions[names.index("Remove")].setVisible(False)
def abortProfileAdd(self, menu_name, exc):
msgbox = QMessageBox()
msgbox.setText("An error has occurred while retrieving the data.")
msgbox.setInformativeText("Try another site or model or try again later.")
msgbox.setDetailedText(exc)
msgbox.setIcon(QMessageBox.Critical)
msgbox.exec_()
if len(self.menu_items) == 1:
self.focusPicker()
self.close()
else:
self.rmProfileCollection(menu_name)
def keyPressEvent(self, e):
#TODO: Up and down keys to loop through profile collection members.
if e.key() == Qt.Key_Left:
self.spc_widget.advanceTime(-1)
self.setInterpolated(self.spc_widget.isInterpolated())
elif e.key() == Qt.Key_Right:
self.spc_widget.advanceTime(1)
self.setInterpolated(self.spc_widget.isInterpolated())
elif e.key() == Qt.Key_Space:
# Swap the profile collections
self.spc_widget.swapProfCollections()
self.setInterpolated(self.spc_widget.isInterpolated())
elif e.matches(QKeySequence.Save):
# Save an image
self.spc_widget.saveimage()
elif e.key() == Qt.Key_W:
self.focusPicker()
def closeEvent(self, e):
self.spc_widget.closeEvent(e)
self.closed.emit()
def createMenuName(self, prof_col):
pc_loc = prof_col.getMeta('loc')
pc_date = prof_col.getMeta('run').strftime("%d/%HZ")
pc_model = prof_col.getMeta('model')
return "%s (%s %s)" % (pc_loc, pc_date, pc_model)
def interpProf(self):
self.setInterpolated(True)
self.spc_widget.interpProf()
def resetProf(self):
self.setInterpolated(False)
self.spc_widget.resetProfInterpolation()
def setInterpolated(self, is_interpolated):
self.resetinterp.setVisible(is_interpolated)
self.interpolate.setVisible(not is_interpolated)
def focusPicker(self):
if self.picker_window is not None:
self.picker_window.activateWindow()
self.picker_window.setFocus()
self.picker_window.raise_()
| djgagne/SHARPpy | sharppy/viz/SPCWindow.py | Python | bsd-3-clause | 29,262 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Amazon EC2, Eucalyptus and Nimbus drivers.
"""
from __future__ import with_statement
import sys
import base64
import os
import copy
from xml.etree import ElementTree as ET
from libcloud.utils.py3 import b
from libcloud.utils.xml import fixxpath, findtext, findattr, findall
from libcloud.common.aws import AWSBaseResponse, SignedAWSConnection
from libcloud.common.types import (InvalidCredsError, MalformedResponseError,
LibcloudError)
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize
from libcloud.compute.base import NodeImage, StorageVolume
API_VERSION = '2010-08-31'
NAMESPACE = 'http://ec2.amazonaws.com/doc/%s/' % (API_VERSION)
"""
Sizes must be hardcoded, because Amazon doesn't provide an API to fetch them.
From http://aws.amazon.com/ec2/instance-types/
"""
INSTANCE_TYPES = {
't1.micro': {
'id': 't1.micro',
'name': 'Micro Instance',
'ram': 613,
'disk': 15,
'bandwidth': None
},
'm1.small': {
'id': 'm1.small',
'name': 'Small Instance',
'ram': 1740,
'disk': 160,
'bandwidth': None
},
'm1.medium': {
'id': 'm1.medium',
'name': 'Medium Instance',
'ram': 3700,
'disk': 410,
'bandwidth': None
},
'm1.large': {
'id': 'm1.large',
'name': 'Large Instance',
'ram': 7680,
'disk': 850,
'bandwidth': None
},
'm1.xlarge': {
'id': 'm1.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': 1690,
'bandwidth': None
},
'c1.medium': {
'id': 'c1.medium',
'name': 'High-CPU Medium Instance',
'ram': 1740,
'disk': 350,
'bandwidth': None
},
'c1.xlarge': {
'id': 'c1.xlarge',
'name': 'High-CPU Extra Large Instance',
'ram': 7680,
'disk': 1690,
'bandwidth': None
},
'm2.xlarge': {
'id': 'm2.xlarge',
'name': 'High-Memory Extra Large Instance',
'ram': 17510,
'disk': 420,
'bandwidth': None
},
'm2.2xlarge': {
'id': 'm2.2xlarge',
'name': 'High-Memory Double Extra Large Instance',
'ram': 35021,
'disk': 850,
'bandwidth': None
},
'm2.4xlarge': {
'id': 'm2.4xlarge',
'name': 'High-Memory Quadruple Extra Large Instance',
'ram': 70042,
'disk': 1690,
'bandwidth': None
},
'm3.xlarge': {
'id': 'm3.xlarge',
'name': 'Extra Large Instance',
'ram': 15360,
'disk': None,
'bandwidth': None
},
'm3.2xlarge': {
'id': 'm3.2xlarge',
'name': 'Double Extra Large Instance',
'ram': 30720,
'disk': None,
'bandwidth': None
},
'cg1.4xlarge': {
'id': 'cg1.4xlarge',
'name': 'Cluster GPU Quadruple Extra Large Instance',
'ram': 22528,
'disk': 1690,
'bandwidth': None
},
'cc1.4xlarge': {
'id': 'cc1.4xlarge',
'name': 'Cluster Compute Quadruple Extra Large Instance',
'ram': 23552,
'disk': 1690,
'bandwidth': None
},
'cc2.8xlarge': {
'id': 'cc2.8xlarge',
'name': 'Cluster Compute Eight Extra Large Instance',
'ram': 63488,
'disk': 3370,
'bandwidth': None
},
'cr1.8xlarge': {
'id': 'cr1.8xlarge',
'name': 'High Memory Cluster Eight Extra Large',
'ram': 244000,
'disk': 240,
'bandwidth': None
},
'hs1.8xlarge': {
'id': 'hs1.8xlarge',
'name': 'High Storage Eight Extra Large Instance',
'ram': 119808,
'disk': 48000,
'bandwidth': None
}
}
REGION_DETAILS = {
'us-east-1': {
'endpoint': 'ec2.us-east-1.amazonaws.com',
'api_name': 'ec2_us_east',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc1.4xlarge',
'cc2.8xlarge',
'cg1.4xlarge',
'cr1.8xlarge',
'hs1.8xlarge'
]
},
'us-west-1': {
'endpoint': 'ec2.us-west-1.amazonaws.com',
'api_name': 'ec2_us_west',
'country': 'USA',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'us-west-2': {
'endpoint': 'ec2.us-west-2.amazonaws.com',
'api_name': 'ec2_us_west_oregon',
'country': 'US',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge'
]
},
'eu-west-1': {
'endpoint': 'ec2.eu-west-1.amazonaws.com',
'api_name': 'ec2_eu_west',
'country': 'Ireland',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge',
'cc2.8xlarge'
]
},
'ap-southeast-1': {
'endpoint': 'ec2.ap-southeast-1.amazonaws.com',
'api_name': 'ec2_ap_southeast',
'country': 'Singapore',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'ap-northeast-1': {
'endpoint': 'ec2.ap-northeast-1.amazonaws.com',
'api_name': 'ec2_ap_northeast',
'country': 'Japan',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'sa-east-1': {
'endpoint': 'ec2.sa-east-1.amazonaws.com',
'api_name': 'ec2_sa_east',
'country': 'Brazil',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'c1.medium',
'c1.xlarge'
]
},
'ap-southeast-2': {
'endpoint': 'ec2.ap-southeast-2.amazonaws.com',
'api_name': 'ec2_ap_southeast_2',
'country': 'Australia',
'instance_types': [
't1.micro',
'm1.small',
'm1.medium',
'm1.large',
'm1.xlarge',
'm2.xlarge',
'm2.2xlarge',
'm2.4xlarge',
'm3.xlarge',
'm3.2xlarge',
'c1.medium',
'c1.xlarge'
]
},
'nimbus': {
# Nimbus clouds have 3 EC2-style instance types but their particular
# RAM allocations are configured by the admin
'country': 'custom',
'instance_types': [
'm1.small',
'm1.large',
'm1.xlarge'
]
}
}
VALID_EC2_DATACENTERS = REGION_DETAILS.keys()
VALID_EC2_DATACENTERS = [d for d in VALID_EC2_DATACENTERS if d != 'nimbus']
class EC2NodeLocation(NodeLocation):
def __init__(self, id, name, country, driver, availability_zone):
super(EC2NodeLocation, self).__init__(id, name, country, driver)
self.availability_zone = availability_zone
def __repr__(self):
return (('<EC2NodeLocation: id=%s, name=%s, country=%s, '
'availability_zone=%s driver=%s>')
% (self.id, self.name, self.country,
self.availability_zone, self.driver.name))
class EC2Response(AWSBaseResponse):
"""
EC2 specific response parsing and error handling.
"""
def parse_error(self):
err_list = []
# Okay, so for Eucalyptus, you can get a 403, with no body,
# if you are using the wrong user/password.
msg = "Failure: 403 Forbidden"
if self.status == 403 and self.body[:len(msg)] == msg:
raise InvalidCredsError(msg)
try:
body = ET.XML(self.body)
except:
raise MalformedResponseError("Failed to parse XML",
body=self.body, driver=EC2NodeDriver)
for err in body.findall('Errors/Error'):
code, message = err.getchildren()
err_list.append("%s: %s" % (code.text, message.text))
if code.text == "InvalidClientTokenId":
raise InvalidCredsError(err_list[-1])
if code.text == "SignatureDoesNotMatch":
raise InvalidCredsError(err_list[-1])
if code.text == "AuthFailure":
raise InvalidCredsError(err_list[-1])
if code.text == "OptInRequired":
raise InvalidCredsError(err_list[-1])
if code.text == "IdempotentParameterMismatch":
raise IdempotentParamError(err_list[-1])
return "\n".join(err_list)
class EC2Connection(SignedAWSConnection):
"""
Represents a single connection to the EC2 Endpoint.
"""
version = API_VERSION
host = REGION_DETAILS['us-east-1']['endpoint']
responseCls = EC2Response
class ExEC2AvailabilityZone(object):
"""
Extension class which stores information about an EC2 availability zone.
Note: This class is EC2 specific.
"""
def __init__(self, name, zone_state, region_name):
self.name = name
self.zone_state = zone_state
self.region_name = region_name
def __repr__(self):
return (('<ExEC2AvailabilityZone: name=%s, zone_state=%s, '
'region_name=%s>')
% (self.name, self.zone_state, self.region_name))
class BaseEC2NodeDriver(NodeDriver):
"""
Base Amazon EC2 node driver.
Used for main EC2 and other derivate driver classes to inherit from it.
"""
connectionCls = EC2Connection
path = '/'
features = {'create_node': ['ssh_key']}
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
def _pathlist(self, key, arr):
"""
Converts a key and an array of values into AWS query param format.
"""
params = {}
i = 0
for value in arr:
i += 1
params["%s.%s" % (key, i)] = value
return params
def _get_boolean(self, element):
tag = "{%s}%s" % (NAMESPACE, 'return')
return element.findtext(tag) == 'true'
def _get_state_boolean(self, element):
"""
Checks for the instances's state
"""
state = findall(element=element,
xpath='instancesSet/item/currentState/name',
namespace=NAMESPACE)[0].text
return state in ('stopping', 'pending', 'starting')
def _get_terminate_boolean(self, element):
status = element.findtext(".//{%s}%s" % (NAMESPACE, 'name'))
return any([term_status == status
for term_status
in ('shutting-down', 'terminated')])
def _to_nodes(self, object, xpath, groups=None):
return [self._to_node(el, groups=groups)
for el in object.findall(fixxpath(xpath=xpath,
namespace=NAMESPACE))]
def _to_node(self, element, groups=None):
try:
state = self.NODE_STATE_MAP[findattr(element=element,
xpath="instanceState/name",
namespace=NAMESPACE)
]
except KeyError:
state = NodeState.UNKNOWN
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
tags = dict((findtext(element=item, xpath='key', namespace=NAMESPACE),
findtext(element=item, xpath='value',
namespace=NAMESPACE))
for item in findall(element=element,
xpath='tagSet/item',
namespace=NAMESPACE)
)
name = tags.get('Name', instance_id)
public_ip = findtext(element=element, xpath='ipAddress',
namespace=NAMESPACE)
public_ips = [public_ip] if public_ip else []
private_ip = findtext(element=element, xpath='privateIpAddress',
namespace=NAMESPACE)
private_ips = [private_ip] if private_ip else []
n = Node(
id=findtext(element=element, xpath='instanceId',
namespace=NAMESPACE),
name=name,
state=state,
public_ips=public_ips,
private_ips=private_ips,
driver=self.connection.driver,
extra={
'dns_name': findattr(element=element, xpath="dnsName",
namespace=NAMESPACE),
'instanceId': findattr(element=element, xpath="instanceId",
namespace=NAMESPACE),
'imageId': findattr(element=element, xpath="imageId",
namespace=NAMESPACE),
'private_dns': findattr(element=element,
xpath="privateDnsName",
namespace=NAMESPACE),
'status': findattr(element=element, xpath="instanceState/name",
namespace=NAMESPACE),
'keyname': findattr(element=element, xpath="keyName",
namespace=NAMESPACE),
'launchindex': findattr(element=element,
xpath="amiLaunchIndex",
namespace=NAMESPACE),
'productcode': [
p.text for p in findall(
element=element,
xpath="productCodesSet/item/productCode",
namespace=NAMESPACE
)],
'instancetype': findattr(element=element, xpath="instanceType",
namespace=NAMESPACE),
'launchdatetime': findattr(element=element, xpath="launchTime",
namespace=NAMESPACE),
'availability': findattr(element,
xpath="placement/availabilityZone",
namespace=NAMESPACE),
'kernelid': findattr(element=element, xpath="kernelId",
namespace=NAMESPACE),
'ramdiskid': findattr(element=element, xpath="ramdiskId",
namespace=NAMESPACE),
'clienttoken': findattr(element=element, xpath="clientToken",
namespace=NAMESPACE),
'groups': groups,
'tags': tags
}
)
return n
def _to_images(self, object):
return [self._to_image(el) for el in object.findall(
fixxpath(xpath='imagesSet/item', namespace=NAMESPACE))
]
def _to_image(self, element):
n = NodeImage(
id=findtext(element=element, xpath='imageId', namespace=NAMESPACE),
name=findtext(element=element, xpath='imageLocation',
namespace=NAMESPACE),
driver=self.connection.driver,
extra={
'state': findattr(element=element, xpath="imageState",
namespace=NAMESPACE),
'ownerid': findattr(element=element, xpath="imageOwnerId",
namespace=NAMESPACE),
'owneralias': findattr(element=element,
xpath="imageOwnerAlias",
namespace=NAMESPACE),
'ispublic': findattr(element=element,
xpath="isPublic",
namespace=NAMESPACE),
'architecture': findattr(element=element,
xpath="architecture",
namespace=NAMESPACE),
'imagetype': findattr(element=element,
xpath="imageType",
namespace=NAMESPACE),
'platform': findattr(element=element,
xpath="platform",
namespace=NAMESPACE),
'rootdevicetype': findattr(element=element,
xpath="rootDeviceType",
namespace=NAMESPACE),
'virtualizationtype': findattr(
element=element, xpath="virtualizationType",
namespace=NAMESPACE),
'hypervisor': findattr(element=element,
xpath="hypervisor",
namespace=NAMESPACE)
}
)
return n
def _to_volume(self, element, name):
volId = findtext(element=element, xpath='volumeId',
namespace=NAMESPACE)
size = findtext(element=element, xpath='size', namespace=NAMESPACE)
return StorageVolume(id=volId,
name=name,
size=int(size),
driver=self)
def list_nodes(self, ex_node_ids=None):
"""
List all nodes
Ex_node_ids parameter is used to filter the list of
nodes that should be returned. Only the nodes
with the corresponding node ids will be returned.
@param ex_node_ids: List of C{node.id}
@type ex_node_ids: C{list} of C{str}
@rtype: C{list} of L{Node}
"""
params = {'Action': 'DescribeInstances'}
if ex_node_ids:
params.update(self._pathlist('InstanceId', ex_node_ids))
elem = self.connection.request(self.path, params=params).object
nodes = []
for rs in findall(element=elem, xpath='reservationSet/item',
namespace=NAMESPACE):
groups = [g.findtext('')
for g in findall(element=rs,
xpath='groupSet/item/groupId',
namespace=NAMESPACE)]
nodes += self._to_nodes(rs, 'instancesSet/item', groups)
nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes)
for node in nodes:
ips = nodes_elastic_ips_mappings[node.id]
node.public_ips.extend(ips)
return nodes
def list_sizes(self, location=None):
available_types = REGION_DETAILS[self.region_name]['instance_types']
sizes = []
for instance_type in available_types:
attributes = INSTANCE_TYPES[instance_type]
attributes = copy.deepcopy(attributes)
price = self._get_size_price(size_id=instance_type)
attributes.update({'price': price})
sizes.append(NodeSize(driver=self, **attributes))
return sizes
def list_images(self, location=None, ex_image_ids=None):
"""
List all images
Ex_image_ids parameter is used to filter the list of
images that should be returned. Only the images
with the corresponding image ids will be returned.
@param ex_image_ids: List of C{NodeImage.id}
@type ex_image_ids: C{list} of C{str}
@rtype: C{list} of L{NodeImage}
"""
params = {'Action': 'DescribeImages'}
if ex_image_ids:
params.update(self._pathlist('ImageId', ex_image_ids))
images = self._to_images(
self.connection.request(self.path, params=params).object
)
return images
def list_locations(self):
locations = []
for index, availability_zone in \
enumerate(self.ex_list_availability_zones()):
locations.append(EC2NodeLocation(
index, availability_zone.name, self.country, self,
availability_zone)
)
return locations
def create_volume(self, size, name, location=None, snapshot=None):
params = {
'Action': 'CreateVolume',
'Size': str(size)}
if location is not None:
params['AvailabilityZone'] = location.availability_zone.name
volume = self._to_volume(
self.connection.request(self.path, params=params).object,
name=name)
self.ex_create_tags(volume, {'Name': name})
return volume
def destroy_volume(self, volume):
params = {
'Action': 'DeleteVolume',
'VolumeId': volume.id}
response = self.connection.request(self.path, params=params).object
return self._get_boolean(response)
def attach_volume(self, node, volume, device):
params = {
'Action': 'AttachVolume',
'VolumeId': volume.id,
'InstanceId': node.id,
'Device': device}
self.connection.request(self.path, params=params)
return True
def detach_volume(self, volume):
params = {
'Action': 'DetachVolume',
'VolumeId': volume.id}
self.connection.request(self.path, params=params)
return True
def ex_create_keypair(self, name):
"""Creates a new keypair
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the keypair to Create. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
@type name: C{str}
@rtype: C{dict}
"""
params = {
'Action': 'CreateKeyPair',
'KeyName': name,
}
response = self.connection.request(self.path, params=params).object
key_material = findtext(element=response, xpath='keyMaterial',
namespace=NAMESPACE)
key_fingerprint = findtext(element=response, xpath='keyFingerprint',
namespace=NAMESPACE)
return {
'keyMaterial': key_material,
'keyFingerprint': key_fingerprint,
}
def ex_import_keypair(self, name, keyfile):
"""
imports a new public key
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the public key to import. This must be
unique, otherwise an InvalidKeyPair.Duplicate exception is raised.
@type name: C{str}
@param keyfile: The filename with path of the public key to import.
@type keyfile: C{str}
@rtype: C{dict}
"""
with open(os.path.expanduser(keyfile)) as fh:
content = fh.read()
base64key = base64.b64encode(content)
params = {
'Action': 'ImportKeyPair',
'KeyName': name,
'PublicKeyMaterial': base64key
}
response = self.connection.request(self.path, params=params).object
key_name = findtext(element=response, xpath='keyName',
namespace=NAMESPACE)
key_fingerprint = findtext(element=response, xpath='keyFingerprint',
namespace=NAMESPACE)
return {
'keyName': key_name,
'keyFingerprint': key_fingerprint,
}
def ex_describe_all_keypairs(self):
"""
Describes all keypairs.
@note: This is a non-standard extension API, and only works for EC2.
@rtype: C{list} of C{str}
"""
params = {
'Action': 'DescribeKeyPairs'
}
response = self.connection.request(self.path, params=params).object
names = []
for elem in findall(element=response, xpath='keySet/item',
namespace=NAMESPACE):
name = findtext(element=elem, xpath='keyName', namespace=NAMESPACE)
names.append(name)
return names
def ex_describe_keypairs(self, name):
"""Describes a keypair by name
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the keypair to describe.
@type name: C{str}
@rtype: C{dict}
"""
params = {
'Action': 'DescribeKeyPairs',
'KeyName.1': name
}
response = self.connection.request(self.path, params=params).object
key_name = findattr(element=response, xpath='keySet/item/keyName',
namespace=NAMESPACE)
return {
'keyName': key_name
}
def ex_list_security_groups(self):
"""
List existing Security Groups.
@note: This is a non-standard extension API, and only works for EC2.
@rtype: C{list} of C{str}
"""
params = {'Action': 'DescribeSecurityGroups'}
response = self.connection.request(self.path, params=params).object
groups = []
for group in findall(element=response, xpath='securityGroupInfo/item',
namespace=NAMESPACE):
name = findtext(element=group, xpath='groupName',
namespace=NAMESPACE)
groups.append(name)
return groups
def ex_create_security_group(self, name, description):
"""
Creates a new Security Group
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to Create.
This must be unique.
@type name: C{str}
@param description: Human readable description of a Security
Group.
@type description: C{str}
@rtype: C{str}
"""
params = {'Action': 'CreateSecurityGroup',
'GroupName': name,
'GroupDescription': description}
return self.connection.request(self.path, params=params).object
def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip,
protocol='tcp'):
"""
Edit a Security Group to allow specific traffic.
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to edit
@type name: C{str}
@param from_port: The beginning of the port range to open
@type from_port: C{str}
@param to_port: The end of the port range to open
@type to_port: C{str}
@param cidr_ip: The ip to allow traffic for.
@type cidr_ip: C{str}
@param protocol: tcp/udp/icmp
@type protocol: C{str}
@rtype: C{bool}
"""
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': protocol,
'FromPort': str(from_port),
'ToPort': str(to_port),
'CidrIp': cidr_ip}
try:
resp = self.connection.request(
self.path, params=params.copy()).object
return bool(findtext(element=resp, xpath='return',
namespace=NAMESPACE))
except Exception:
e = sys.exc_info()[1]
if e.args[0].find('InvalidPermission.Duplicate') == -1:
raise e
def ex_authorize_security_group_permissive(self, name):
"""
Edit a Security Group to allow all traffic.
@note: This is a non-standard extension API, and only works for EC2.
@param name: The name of the security group to edit
@type name: C{str}
@rtype: C{list} of C{str}
"""
results = []
params = {'Action': 'AuthorizeSecurityGroupIngress',
'GroupName': name,
'IpProtocol': 'tcp',
'FromPort': '0',
'ToPort': '65535',
'CidrIp': '0.0.0.0/0'}
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params['IpProtocol'] = 'udp'
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
params.update({'IpProtocol': 'icmp', 'FromPort': '-1', 'ToPort': '-1'})
try:
results.append(
self.connection.request(self.path, params=params.copy()).object
)
except Exception:
e = sys.exc_info()[1]
if e.args[0].find("InvalidPermission.Duplicate") == -1:
raise e
return results
def ex_list_availability_zones(self, only_available=True):
"""
Return a list of L{ExEC2AvailabilityZone} objects for the
current region.
Note: This is an extension method and is only available for EC2
driver.
@keyword only_available: If true, return only availability zones
with state 'available'
@type only_available: C{str}
@rtype: C{list} of L{ExEC2AvailabilityZone}
"""
params = {'Action': 'DescribeAvailabilityZones'}
if only_available:
params.update({'Filter.0.Name': 'state'})
params.update({'Filter.0.Value.0': 'available'})
params.update({'Filter.1.Name': 'region-name'})
params.update({'Filter.1.Value.0': self.region_name})
result = self.connection.request(self.path,
params=params.copy()).object
availability_zones = []
for element in findall(element=result,
xpath='availabilityZoneInfo/item',
namespace=NAMESPACE):
name = findtext(element=element, xpath='zoneName',
namespace=NAMESPACE)
zone_state = findtext(element=element, xpath='zoneState',
namespace=NAMESPACE)
region_name = findtext(element=element, xpath='regionName',
namespace=NAMESPACE)
availability_zone = ExEC2AvailabilityZone(
name=name,
zone_state=zone_state,
region_name=region_name
)
availability_zones.append(availability_zone)
return availability_zones
def ex_describe_tags(self, resource):
"""
Return a dictionary of tags for a resource (Node or StorageVolume).
@param resource: resource which should be used
@type resource: L{Node} or L{StorageVolume}
@return: dict Node tags
@rtype: C{dict}
"""
params = {'Action': 'DescribeTags',
'Filter.0.Name': 'resource-id',
'Filter.0.Value.0': resource.id,
'Filter.1.Name': 'resource-type',
'Filter.1.Value.0': 'instance',
}
result = self.connection.request(self.path,
params=params.copy()).object
tags = {}
for element in findall(element=result, xpath='tagSet/item',
namespace=NAMESPACE):
key = findtext(element=element, xpath='key', namespace=NAMESPACE)
value = findtext(element=element,
xpath='value', namespace=NAMESPACE)
tags[key] = value
return tags
def ex_create_tags(self, resource, tags):
"""
Create tags for a resource (Node or StorageVolume).
@param resource: Resource to be tagged
@type resource: L{Node} or L{StorageVolume}
@param tags: A dictionary or other mapping of strings to strings,
associating tag names with tag values.
@type tags: C{dict}
@rtype: C{bool}
"""
if not tags:
return
params = {'Action': 'CreateTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def ex_delete_tags(self, resource, tags):
"""
Delete tags from a resource.
@param resource: Resource to be tagged
@type resource: L{Node} or L{StorageVolume}
@param tags: A dictionary or other mapping of strings to strings,
specifying the tag names and tag values to be deleted.
@type tags: C{dict}
@rtype: C{bool}
"""
if not tags:
return
params = {'Action': 'DeleteTags',
'ResourceId.0': resource.id}
for i, key in enumerate(tags):
params['Tag.%d.Key' % i] = key
params['Tag.%d.Value' % i] = tags[key]
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def _add_instance_filter(self, params, node):
"""
Add instance filter to the provided params dictionary.
"""
params.update({
'Filter.0.Name': 'instance-id',
'Filter.0.Value.0': node.id
})
def ex_describe_all_addresses(self, only_allocated=False):
"""
Return all the Elastic IP addresses for this account
optionally, return only the allocated addresses
@param only_allocated: If true, return only those addresses
that are associated with an instance
@type only_allocated: C{str}
@return: list list of elastic ips for this particular account.
@rtype: C{list} of C{str}
"""
params = {'Action': 'DescribeAddresses'}
result = self.connection.request(self.path,
params=params.copy()).object
# the list which we return
elastic_ip_addresses = []
for element in findall(element=result, xpath='addressesSet/item',
namespace=NAMESPACE):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
# if only allocated addresses are requested
if only_allocated and not instance_id:
continue
ip_address = findtext(element=element, xpath='publicIp',
namespace=NAMESPACE)
elastic_ip_addresses.append(ip_address)
return elastic_ip_addresses
def ex_associate_addresses(self, node, elastic_ip_address):
"""
Associate an IP address with a particular node.
@param node: Node instance
@type node: L{Node}
@param elastic_ip_address: IP address which should be used
@type elastic_ip_address: C{str}
@rtype: C{bool}
"""
params = {'Action': 'AssociateAddress'}
params.update(self._pathlist('InstanceId', [node.id]))
params.update({'PublicIp': elastic_ip_address})
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_describe_addresses(self, nodes):
"""
Return Elastic IP addresses for all the nodes in the provided list.
@param nodes: List of C{Node} instances
@type nodes: C{list} of L{Node}
@return: Dictionary where a key is a node ID and the value is a
list with the Elastic IP addresses associated with this node.
@rtype: C{dict}
"""
if not nodes:
return {}
params = {'Action': 'DescribeAddresses'}
if len(nodes) == 1:
self._add_instance_filter(params, nodes[0])
result = self.connection.request(self.path,
params=params.copy()).object
node_instance_ids = [node.id for node in nodes]
nodes_elastic_ip_mappings = {}
for node_id in node_instance_ids:
nodes_elastic_ip_mappings.setdefault(node_id, [])
for element in findall(element=result, xpath='addressesSet/item',
namespace=NAMESPACE):
instance_id = findtext(element=element, xpath='instanceId',
namespace=NAMESPACE)
ip_address = findtext(element=element, xpath='publicIp',
namespace=NAMESPACE)
if instance_id not in node_instance_ids:
continue
nodes_elastic_ip_mappings[instance_id].append(ip_address)
return nodes_elastic_ip_mappings
def ex_describe_addresses_for_node(self, node):
"""
Return a list of Elastic IP addresses associated with this node.
@param node: Node instance
@type node: L{Node}
@return: list Elastic IP addresses attached to this node.
@rtype: C{list} of C{str}
"""
node_elastic_ips = self.ex_describe_addresses([node])
return node_elastic_ips[node.id]
def ex_modify_instance_attribute(self, node, attributes):
"""
Modify node attributes.
A list of valid attributes can be found at http://goo.gl/gxcj8
@param node: Node instance
@type node: L{Node}
@param attributes: Dictionary with node attributes
@type attributes: C{dict}
@return: True on success, False otherwise.
@rtype: C{bool}
"""
attributes = attributes or {}
attributes.update({'InstanceId': node.id})
params = {'Action': 'ModifyInstanceAttribute'}
params.update(attributes)
result = self.connection.request(self.path,
params=params.copy()).object
element = findtext(element=result, xpath='return',
namespace=NAMESPACE)
return element == 'true'
def ex_change_node_size(self, node, new_size):
"""
Change the node size.
Note: Node must be turned of before changing the size.
@param node: Node instance
@type node: L{Node}
@param new_size: NodeSize intance
@type new_size: L{NodeSize}
@return: True on success, False otherwise.
@rtype: C{bool}
"""
if 'instancetype' in node.extra:
current_instance_type = node.extra['instancetype']
if current_instance_type == new_size.id:
raise ValueError('New instance size is the same as' +
'the current one')
attributes = {'InstanceType.Value': new_size.id}
return self.ex_modify_instance_attribute(node, attributes)
def create_node(self, **kwargs):
"""Create a new EC2 node
Reference: http://bit.ly/8ZyPSy [docs.amazonwebservices.com]
@inherits: L{NodeDriver.create_node}
@keyword ex_mincount: Minimum number of instances to launch
@type ex_mincount: C{int}
@keyword ex_maxcount: Maximum number of instances to launch
@type ex_maxcount: C{int}
@keyword ex_securitygroup: Name of security group
@type ex_securitygroup: C{str}
@keyword ex_keyname: The name of the key pair
@type ex_keyname: C{str}
@keyword ex_userdata: User data
@type ex_userdata: C{str}
@keyword ex_clienttoken: Unique identifier to ensure idempotency
@type ex_clienttoken: C{str}
@keyword ex_blockdevicemappings: C{list} of C{dict} block device
mappings. Example:
[{'DeviceName': '/dev/sdb', 'VirtualName': 'ephemeral0'}]
@type ex_blockdevicemappings: C{list} of C{dict}
"""
image = kwargs["image"]
size = kwargs["size"]
params = {
'Action': 'RunInstances',
'ImageId': image.id,
'MinCount': kwargs.get('ex_mincount', '1'),
'MaxCount': kwargs.get('ex_maxcount', '1'),
'InstanceType': size.id
}
if 'ex_securitygroup' in kwargs:
if not isinstance(kwargs['ex_securitygroup'], list):
kwargs['ex_securitygroup'] = [kwargs['ex_securitygroup']]
for sig in range(len(kwargs['ex_securitygroup'])):
params['SecurityGroup.%d' % (sig + 1,)] =\
kwargs['ex_securitygroup'][sig]
if 'location' in kwargs:
availability_zone = getattr(kwargs['location'],
'availability_zone', None)
if availability_zone:
if availability_zone.region_name != self.region_name:
raise AttributeError('Invalid availability zone: %s'
% (availability_zone.name))
params['Placement.AvailabilityZone'] = availability_zone.name
if 'ex_keyname' in kwargs:
params['KeyName'] = kwargs['ex_keyname']
if 'ex_userdata' in kwargs:
params['UserData'] = base64.b64encode(b(kwargs['ex_userdata']))\
.decode('utf-8')
if 'ex_clienttoken' in kwargs:
params['ClientToken'] = kwargs['ex_clienttoken']
if 'ex_blockdevicemappings' in kwargs:
for index, mapping in enumerate(kwargs['ex_blockdevicemappings']):
params['BlockDeviceMapping.%d.DeviceName' % (index + 1)] = \
mapping['DeviceName']
params['BlockDeviceMapping.%d.VirtualName' % (index + 1)] = \
mapping['VirtualName']
object = self.connection.request(self.path, params=params).object
nodes = self._to_nodes(object, 'instancesSet/item')
for node in nodes:
tags = {'Name': kwargs['name']}
try:
self.ex_create_tags(resource=node, tags=tags)
except Exception:
continue
node.name = kwargs['name']
node.extra.update({'tags': tags})
if len(nodes) == 1:
return nodes[0]
else:
return nodes
def reboot_node(self, node):
params = {'Action': 'RebootInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_boolean(res)
def ex_start_node(self, node):
"""
Start the node by passing in the node object, does not work with
instance store backed instances
@param node: Node which should be used
@type node: L{Node}
@rtype: C{bool}
"""
params = {'Action': 'StartInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def ex_stop_node(self, node):
"""
Stop the node by passing in the node object, does not work with
instance store backed instances
@param node: Node which should be used
@type node: L{Node}
@rtype: C{bool}
"""
params = {'Action': 'StopInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_state_boolean(res)
def destroy_node(self, node):
params = {'Action': 'TerminateInstances'}
params.update(self._pathlist('InstanceId', [node.id]))
res = self.connection.request(self.path, params=params).object
return self._get_terminate_boolean(res)
class EC2NodeDriver(BaseEC2NodeDriver):
"""
Amazon EC2 node driver.
"""
connectionCls = EC2Connection
type = Provider.EC2
name = 'Amazon EC2'
website = 'http://aws.amazon.com/ec2/'
path = '/'
region_name = 'us-east-1'
country = 'USA'
api_name = 'ec2_us_east'
features = {'create_node': ['ssh_key']}
NODE_STATE_MAP = {
'pending': NodeState.PENDING,
'running': NodeState.RUNNING,
'shutting-down': NodeState.UNKNOWN,
'terminated': NodeState.TERMINATED
}
class IdempotentParamError(LibcloudError):
"""
Request used the same client token as a previous,
but non-identical request.
"""
def __str__(self):
return repr(self.value)
class EC2EUConnection(EC2Connection):
"""
Connection class for EC2 in the Western Europe Region
"""
host = REGION_DETAILS['eu-west-1']['endpoint']
class EC2EUNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western Europe Region.
"""
api_name = 'ec2_eu_west'
name = 'Amazon EC2 (eu-west-1)'
friendly_name = 'Amazon Europe Ireland'
country = 'IE'
region_name = 'eu-west-1'
connectionCls = EC2EUConnection
class EC2USWestConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region
"""
host = REGION_DETAILS['us-west-1']['endpoint']
class EC2USWestNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Western US Region
"""
api_name = 'ec2_us_west'
name = 'Amazon EC2 (us-west-1)'
friendly_name = 'Amazon US N. California'
country = 'US'
region_name = 'us-west-1'
connectionCls = EC2USWestConnection
class EC2USWestOregonConnection(EC2Connection):
"""
Connection class for EC2 in the Western US Region (Oregon).
"""
host = REGION_DETAILS['us-west-2']['endpoint']
class EC2USWestOregonNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the US West Oregon region.
"""
api_name = 'ec2_us_west_oregon'
name = 'Amazon EC2 (us-west-2)'
friendly_name = 'Amazon US West - Oregon'
country = 'US'
region_name = 'us-west-2'
connectionCls = EC2USWestOregonConnection
class EC2APSEConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific Region.
"""
host = REGION_DETAILS['ap-southeast-1']['endpoint']
class EC2APNEConnection(EC2Connection):
"""
Connection class for EC2 in the Northeast Asia Pacific Region.
"""
host = REGION_DETAILS['ap-northeast-1']['endpoint']
class EC2APSENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific Region.
"""
api_name = 'ec2_ap_southeast'
name = 'Amazon EC2 (ap-southeast-1)'
friendly_name = 'Amazon Asia-Pacific Singapore'
country = 'SG'
region_name = 'ap-southeast-1'
connectionCls = EC2APSEConnection
class EC2APNENodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Northeast Asia Pacific Region.
"""
api_name = 'ec2_ap_northeast'
name = 'Amazon EC2 (ap-northeast-1)'
friendly_name = 'Amazon Asia-Pacific Tokyo'
country = 'JP'
region_name = 'ap-northeast-1'
connectionCls = EC2APNEConnection
class EC2SAEastConnection(EC2Connection):
"""
Connection class for EC2 in the South America (Sao Paulo) Region.
"""
host = REGION_DETAILS['sa-east-1']['endpoint']
class EC2SAEastNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the South America (Sao Paulo) Region.
"""
api_name = 'ec2_sa_east'
name = 'Amazon EC2 (sa-east-1)'
friendly_name = 'Amazon South America Sao Paulo'
country = 'BR'
region_name = 'sa-east-1'
connectionCls = EC2SAEastConnection
class EC2APSESydneyConnection(EC2Connection):
"""
Connection class for EC2 in the Southeast Asia Pacific (Sydney) Region.
"""
host = REGION_DETAILS['ap-southeast-2']['endpoint']
class EC2APSESydneyNodeDriver(EC2NodeDriver):
"""
Driver class for EC2 in the Southeast Asia Pacific (Sydney) Region.
"""
api_name = 'ec2_ap_southeast_2'
name = 'Amazon EC2 (ap-southeast-2)'
friendly_name = 'Amazon Asia-Pacific Sydney'
country = 'AU'
region_name = 'ap-southeast-2'
connectionCls = EC2APSESydneyConnection
class EucConnection(EC2Connection):
"""
Connection class for Eucalyptus
"""
host = None
class EucNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Eucalyptus
"""
name = 'Eucalyptus'
website = 'http://www.eucalyptus.com/'
api_name = 'ec2_us_east'
region_name = 'us-east-1'
connectionCls = EucConnection
def __init__(self, key, secret=None, secure=True, host=None,
path=None, port=None):
"""
@inherits: L{EC2NodeDriver.__init__}
@param path: The host where the API can be reached.
@type path: C{str}
"""
super(EucNodeDriver, self).__init__(key, secret, secure, host, port)
if path is None:
path = "/services/Eucalyptus"
self.path = path
def list_locations(self):
raise NotImplementedError(
'list_locations not implemented for this driver')
def _add_instance_filter(self, params, node):
"""
Eucalyptus driver doesn't support filtering on instance id so this is a
no-op.
"""
pass
class NimbusConnection(EC2Connection):
"""
Connection class for Nimbus
"""
host = None
class NimbusNodeDriver(BaseEC2NodeDriver):
"""
Driver class for Nimbus
"""
type = Provider.NIMBUS
name = 'Nimbus'
website = 'http://www.nimbusproject.org/'
country = 'Private'
api_name = 'nimbus'
region_name = 'nimbus'
friendly_name = 'Nimbus Private Cloud'
connectionCls = NimbusConnection
def ex_describe_addresses(self, nodes):
"""
Nimbus doesn't support elastic IPs, so this is a passthrough.
@inherits: L{EC2NodeDriver.ex_describe_addresses}
"""
nodes_elastic_ip_mappings = {}
for node in nodes:
# empty list per node
nodes_elastic_ip_mappings[node.id] = []
return nodes_elastic_ip_mappings
def ex_create_tags(self, resource, tags):
"""
Nimbus doesn't support creating tags, so this is a passthrough.
@inherits: L{EC2NodeDriver.ex_create_tags}
"""
pass
| ema/conpaas | conpaas-services/contrib/libcloud/compute/drivers/ec2.py | Python | bsd-3-clause | 53,777 |
__version__ = "1.12.0"
__version_info__ = ( 1, 12, 0 )
| JeffHoogland/bodhi3packages | python3-efl-i386/usr/lib/python3.4/dist-packages/efl/__init__.py | Python | bsd-3-clause | 56 |
#!/usr/bin/env python
"""Backport from python2.7 to python <= 2.6."""
from itertools import repeat as _repeat, chain as _chain, starmap as _starmap
try:
from itertools import izip_longest as _zip_longest
except ImportError:
from itertools import izip
def _zip_longest(*args, **kwds):
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = _repeat(fillvalue)
iters = [_chain(it, sentinel(), fillers) for it in args]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
class OrderedDict(dict):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError('dictionary is empty')
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def update(self, other=(), **kwds):
if hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self]
def items(self):
return [(key, self[key]) for key in self]
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return all(p==q for p, q in _zip_longest(self.items(), other.items()))
return dict.__eq__(self, other)
# End class OrderedDict
| lewisodriscoll/sasview | src/sas/sascalc/data_util/ordereddict.py | Python | bsd-3-clause | 3,441 |
#!/usr/bin/env python
'''
Author: Christopher Duffy
Date: February 2015
Name: nmap_scanner.py
Purpose: To scan a network
Copyright (c) 2015, Christopher Duffy All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met: * Redistributions
of source code must retain the above copyright notice, this list of conditions and
the following disclaimer. * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution. * Neither the
name of the nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CHRISTOPHER DUFFY BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import sys
try:
import nmap
except:
sys.exit("[!] Install the nmap library: pip install python-nmap")
# Argument Validator
if len(sys.argv) != 3:
sys.exit("Please provide two arguments the first being the targets the second the ports")
ports = str(sys.argv[2])
addrs = str(sys.argv[1])
scanner = nmap.PortScanner()
scanner.scan(addrs, ports)
for host in scanner.all_hosts():
if “” in host:
print("The host's IP address is %s and it's hostname was not found") % (host, scanner[host].hostname())
else:
print("The host's IP address is %s and it's hostname is %s") % (host, scanner[host].hostname())
| liorvh/pythonpentest | nmap_scannner.py | Python | bsd-3-clause | 2,228 |
# Nov 22, 2014
# This patch is to create all the prep/sample template files and link them in
# the database so they are present for download
from os.path import join
from time import strftime
from qiita_db.util import get_mountpoint
from qiita_db.sql_connection import SQLConnectionHandler
from qiita_db.metadata_template import SampleTemplate, PrepTemplate
conn_handler = SQLConnectionHandler()
_id, fp_base = get_mountpoint('templates')[0]
for study_id in conn_handler.execute_fetchall(
"SELECT study_id FROM qiita.study"):
study_id = study_id[0]
if SampleTemplate.exists(study_id):
st = SampleTemplate(study_id)
fp = join(fp_base, '%d_%s.txt' % (study_id, strftime("%Y%m%d-%H%M%S")))
st.to_file(fp)
st.add_filepath(fp)
for prep_template_id in conn_handler.execute_fetchall(
"SELECT prep_template_id FROM qiita.prep_template"):
prep_template_id = prep_template_id[0]
pt = PrepTemplate(prep_template_id)
study_id = pt.study_id
fp = join(fp_base, '%d_prep_%d_%s.txt' % (pt.study_id, prep_template_id,
strftime("%Y%m%d-%H%M%S")))
pt.to_file(fp)
pt.add_filepath(fp)
| RNAer/qiita | qiita_db/support_files/patches/python_patches/6.py | Python | bsd-3-clause | 1,165 |
from __future__ import print_function
import numpy as np
from bokeh.client import push_session
from bokeh.io import curdoc
from bokeh.models import (ColumnDataSource, DataRange1d, Plot, Circle, WidgetBox,
Row, Button, TapTool)
N = 9
x = np.linspace(-2, 2, N)
y = x**2
source1 = ColumnDataSource(dict(x = x, y = y, radius = [0.1]*N))
xdr1 = DataRange1d()
ydr1 = DataRange1d()
plot1 = Plot(x_range=xdr1, y_range=ydr1, plot_width=400, plot_height=400)
plot1.title.text = "Plot1"
plot1.tools.append(TapTool(plot=plot1))
plot1.add_glyph(source1, Circle(x="x", y="y", radius="radius", fill_color="red"))
source2 = ColumnDataSource(dict(x = x, y = y, color = ["blue"]*N))
xdr2 = DataRange1d()
ydr2 = DataRange1d()
plot2 = Plot(x_range=xdr2, y_range=ydr2, plot_width=400, plot_height=400)
plot2.title.text = "Plot2"
plot2.tools.append(TapTool(plot=plot2))
plot2.add_glyph(source2, Circle(x="x", y="y", radius=0.1, fill_color="color"))
def on_selection_change1(attr, _, inds):
color = ["blue"]*N
if inds['1d']['indices']:
indices = inds['1d']['indices']
for i in indices:
color[i] = "red"
source2.data["color"] = color
source1.on_change('selected', on_selection_change1)
def on_selection_change2(attr, _, inds):
inds = inds['1d']['indices']
if inds:
[index] = inds
radius = [0.1]*N
radius[index] = 0.2
else:
radius = [0.1]*N
source1.data["radius"] = radius
source2.on_change('selected', on_selection_change2)
reset = Button(label="Reset")
def on_reset_click():
source1.selected = {
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': {}}
}
source2.selected = {
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': {}}
}
reset.on_click(on_reset_click)
widgetBox = WidgetBox(children=[reset], width=150)
row = Row(children=[widgetBox, plot1, plot2])
document = curdoc()
document.add_root(row)
if __name__ == "__main__":
print("\npress ctrl-C to exit")
session = push_session(document)
session.show()
session.loop_until_closed()
| DuCorey/bokeh | examples/models/server/linked_tap.py | Python | bsd-3-clause | 2,181 |
"""
BrowserID support
"""
from social.backends.base import BaseAuth
from social.exceptions import AuthFailed, AuthMissingParameter
class PersonaAuth(BaseAuth):
"""BrowserID authentication backend"""
name = 'persona'
def get_user_id(self, details, response):
"""Use BrowserID email as ID"""
return details['email']
def get_user_details(self, response):
"""Return user details, BrowserID only provides Email."""
# {'status': 'okay',
# 'audience': 'localhost:8000',
# 'expires': 1328983575529,
# 'email': '[email protected]',
# 'issuer': 'browserid.org'}
email = response['email']
return {'username': email.split('@', 1)[0],
'email': email,
'fullname': '',
'first_name': '',
'last_name': ''}
def extra_data(self, user, uid, response, details):
"""Return users extra data"""
return {'audience': response['audience'],
'issuer': response['issuer']}
def auth_complete(self, *args, **kwargs):
"""Completes loging process, must return user instance"""
if not 'assertion' in self.data:
raise AuthMissingParameter(self, 'assertion')
response = self.get_json('https://browserid.org/verify', data={
'assertion': self.data['assertion'],
'audience': self.strategy.request_host()
}, method='POST')
if response.get('status') == 'failure':
raise AuthFailed(self)
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
| nvbn/python-social-auth | social/backends/persona.py | Python | bsd-3-clause | 1,664 |
from sqlalchemy import JSON, Boolean, Column, ForeignKey, Index, Integer, String
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from sqlalchemy.sql.functions import GenericFunction
from qcfractal.storage_sockets.models.sql_base import Base, MsgpackExt
# class json_agg(GenericFunction):
# type = postgresql.JSON
class json_build_object(GenericFunction):
type = postgresql.JSON
class CollectionORM(Base):
"""
A base collection class of precomuted workflows such as datasets, ..
This is a dynamic document, so it will accept any number of
extra fields (expandable and uncontrolled schema)
"""
__tablename__ = "collection"
id = Column(Integer, primary_key=True)
collection_type = Column(String) # for inheritance
collection = Column(String(100), nullable=False)
lname = Column(String(100), nullable=False)
name = Column(String(100), nullable=False)
tags = Column(JSON)
tagline = Column(String)
description = Column(String)
group = Column(String(100), nullable=False)
visibility = Column(Boolean, nullable=False)
view_url_hdf5 = Column(String)
view_url_plaintext = Column(String)
view_metadata = Column(JSON)
view_available = Column(Boolean, nullable=False)
provenance = Column(JSON)
extra = Column(JSON) # extra data related to specific collection type
def update_relations(self, **kwarg):
pass
__table_args__ = (
Index("ix_collection_lname", "collection", "lname", unique=True),
Index("ix_collection_type", "collection_type"),
)
__mapper_args__ = {"polymorphic_on": "collection_type"}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class DatasetMixin:
"""
Mixin class for common Dataset attributes.
"""
default_benchmark = Column(String)
default_keywords = Column(JSON)
default_driver = Column(String)
default_units = Column(String)
alias_keywords = Column(JSON)
default_program = Column(String)
history_keys = Column(JSON)
history = Column(JSON)
class ContributedValuesORM(Base):
"""One group of a contibuted values per dataset
Each dataset can have multiple rows in this table"""
__tablename__ = "contributed_values"
collection_id = Column(Integer, ForeignKey("collection.id", ondelete="cascade"), primary_key=True)
name = Column(String, nullable=False, primary_key=True)
values = Column(MsgpackExt, nullable=False)
index = Column(MsgpackExt, nullable=False)
values_structure = Column(JSON, nullable=False)
theory_level = Column(JSON, nullable=False)
units = Column(String, nullable=False)
theory_level_details = Column(JSON)
citations = Column(JSON)
external_url = Column(String)
doi = Column(String)
comments = Column(String)
class DatasetEntryORM(Base):
"""Association table for many to many"""
__tablename__ = "dataset_entry"
dataset_id = Column(Integer, ForeignKey("dataset.id", ondelete="cascade"), primary_key=True)
# TODO: check the cascase_delete with molecule
molecule_id = Column(Integer, ForeignKey("molecule.id"), nullable=False)
name = Column(String, nullable=False, primary_key=True)
comment = Column(String)
local_results = Column(JSON)
class DatasetORM(CollectionORM, DatasetMixin):
"""
The Dataset class for homogeneous computations on many molecules.
"""
__tablename__ = "dataset"
id = Column(Integer, ForeignKey("collection.id", ondelete="CASCADE"), primary_key=True)
contributed_values_obj = relationship(ContributedValuesORM, lazy="selectin", cascade="all, delete-orphan")
records_obj = relationship(
DatasetEntryORM, lazy="selectin", cascade="all, delete-orphan", backref="dataset" # lazy='noload',
)
@hybrid_property
def contributed_values(self):
return self._contributed_values(self.contributed_values_obj)
@staticmethod
def _contributed_values(contributed_values_obj):
if not contributed_values_obj:
return {}
if not isinstance(contributed_values_obj, list):
contributed_values_obj = [contributed_values_obj]
ret = {}
try:
for obj in contributed_values_obj:
ret[obj.name.lower()] = obj.to_dict(exclude=["collection_id"])
except Exception as err:
pass
return ret
@contributed_values.setter
def contributed_values(self, dict_values):
return dict_values
@hybrid_property
def records(self):
"""calculated property when accessed, not saved in the DB
A view of the many to many relation"""
return self._records(self.records_obj)
@staticmethod
def _records(records_obj):
if not records_obj:
return []
if not isinstance(records_obj, list):
records_obj = [records_obj]
ret = []
try:
for rec in records_obj:
ret.append(rec.to_dict(exclude=["dataset_id"]))
except Exception as err:
# raises exception of first access!!
pass
return ret
@records.setter
def records(self, dict_values):
return dict_values
def update_relations(self, records=None, contributed_values=None, **kwarg):
self.records_obj = []
records = [] if not records else records
for rec_dict in records:
rec = DatasetEntryORM(dataset_id=int(self.id), **rec_dict)
self.records_obj.append(rec)
self.contributed_values_obj = []
contributed_values = {} if not contributed_values else contributed_values
for key, rec_dict in contributed_values.items():
rec = ContributedValuesORM(collection_id=int(self.id), **rec_dict)
self.contributed_values_obj.append(rec)
__table_args__ = (
# Index('ix_results_molecule', 'molecule'), # b-tree index
# UniqueConstraint("program", "driver", "method", "basis", "keywords", "molecule", name='uix_results_keys'),
)
__mapper_args__ = {
"polymorphic_identity": "dataset",
# to have separate select when querying CollectionORM
"polymorphic_load": "selectin",
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ReactionDatasetEntryORM(Base):
"""Association table for many to many"""
__tablename__ = "reaction_dataset_entry"
reaction_dataset_id = Column(Integer, ForeignKey("reaction_dataset.id", ondelete="cascade"), primary_key=True)
attributes = Column(JSON)
name = Column(String, nullable=False, primary_key=True)
reaction_results = Column(JSON)
stoichiometry = Column(JSON)
extras = Column(JSON)
class ReactionDatasetORM(CollectionORM, DatasetMixin):
"""
Reaction Dataset
"""
__tablename__ = "reaction_dataset"
id = Column(Integer, ForeignKey("collection.id", ondelete="CASCADE"), primary_key=True)
ds_type = Column(String)
records_obj = relationship(
ReactionDatasetEntryORM, lazy="selectin", cascade="all, delete-orphan", backref="reaction_dataset"
)
contributed_values_obj = relationship(ContributedValuesORM, lazy="selectin", cascade="all, delete-orphan")
@hybrid_property
def contributed_values(self):
return self._contributed_values(self.contributed_values_obj)
@staticmethod
def _contributed_values(contributed_values_obj):
return DatasetORM._contributed_values(contributed_values_obj)
@contributed_values.setter
def contributed_values(self, dict_values):
return dict_values
def update_relations(self, records=None, contributed_values=None, **kwarg):
self.records_obj = []
records = records or []
for rec_dict in records:
rec = ReactionDatasetEntryORM(reaction_dataset_id=int(self.id), **rec_dict)
self.records_obj.append(rec)
self.contributed_values_obj = []
contributed_values = {} if not contributed_values else contributed_values
for key, rec_dict in contributed_values.items():
rec = ContributedValuesORM(collection_id=int(self.id), **rec_dict)
self.contributed_values_obj.append(rec)
@hybrid_property
def records(self):
"""calculated property when accessed, not saved in the DB
A view of the many to many relation"""
return self._records(self.records_obj)
@staticmethod
def _records(records_obj):
if not records_obj:
return []
if not isinstance(records_obj, list):
records_obj = [records_obj]
ret = []
try:
for rec in records_obj:
ret.append(rec.to_dict(exclude=["reaction_dataset_id"]))
except Exception as err:
# raises exception of first access!!
pass
return ret
@records.setter
def records(self, dict_values):
return dict_values
__table_args__ = (
# Index('ix_results_molecule', 'molecule'), # b-tree index
# UniqueConstraint("program", "driver", "method", "basis", "keywords", "molecule", name='uix_results_keys'),
)
__mapper_args__ = {
"polymorphic_identity": "reactiondataset",
# to have separate select when querying CollectionORM
"polymorphic_load": "selectin",
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| psi4/mongo_qcdb | qcfractal/storage_sockets/models/collections_models.py | Python | bsd-3-clause | 9,556 |
# -*- coding: utf-8 -*-
#
# SymPy documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 22 19:34:32 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here.
sys.path.extend(['../sympy', 'ext'])
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'numpydoc', 'sympylive',]
# Use this to use pngmath instead
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', ]
# MathJax file, which is free to use. See http://www.mathjax.org/docs/2.0/start.html
mathjax_path = 'https://c328740.ssl.cf1.rackcdn.com/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SymPy'
copyright = '2008, 2009, 2010, 2011, 2012 SymPy Development Team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.7.2'
# The full version, including alpha/beta/rc tags.
release = '0.7.2-git'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# Translations:
locale_dirs = ["i18n/"]
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_logo = '_static/sympylogo.png'
html_favicon = '../logo/SymPy-Favicon.ico'
html_theme_options = {'collapsiblesidebar': True}
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'SymPydoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual], toctree_only).
# toctree_only is set to True so that the start file document itself is not included in the
# output, only the documents referenced by it via TOC trees. The extra stuff in the master
# document is intended to show up in the HTML, but doesn't really belong in the LaTeX output.
latex_documents = [('index', 'sympy-%s.tex' % release, 'SymPy Documentation',
'SymPy Development Team', 'manual', True)]
# Additional stuff for the LaTeX preamble.
# Tweaked to work with XeTeX.
latex_elements = {
'babel': '',
'fontenc': r'''
\usepackage{amssymb}
\usepackage{fontspec}
\defaultfontfeatures{Mapping=tex-text}
\setmainfont{DejaVu Serif}
\setsansfont{DejaVu Sans}
\setmonofont{DejaVu Sans Mono}
''',
'fontpkg': '',
'inputenc': '',
'utf8extra': '',
'preamble': ''
}
# SymPy logo on title page
latex_logo = '_static/sympylogo.png'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Show page numbers next to internal references
latex_show_pagerefs = True
# We use False otherwise the module index gets generated twice.
latex_use_modindex = False
default_role = 'math'
pngmath_divpng_args = ['-gamma 1.5','-D 110']
# Note, this is ignored by the mathjax extension
# Any \newcommand should be defined in the file
pngmath_latex_preamble = '\\usepackage{amsmath}\n'+\
'\\usepackage{bm}\n'+\
'\\usepackage{amsfonts}\n'+\
'\\usepackage{amssymb}\n'+\
'\\setlength{\\parindent}{0pt}\n'
texinfo_documents = [
(master_doc, 'sympy', 'SymPy Documentation',
'SymPy Development Team',
'SymPy', 'Computer algebra system (CAS) in Python', 'Programming',
1),
]
| flacjacket/sympy | doc/src/conf.py | Python | bsd-3-clause | 6,093 |
d = {}
for i in range(100000):
d[i] = i
JS_CODE = '''
var d = {};
for (var i = 0; i < 100000; i++) {
d[i] = i;
}
'''
| kikocorreoso/brython | www/speed/benchmarks/add_dict.py | Python | bsd-3-clause | 124 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
from foobar.app import create_app
from foobar.user.models import User
from foobar.settings import DevConfig, ProdConfig
from foobar.database import db
if os.environ.get("FOOBAR_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
manager.add_command('server', Server())
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| ghofranehr/foobar | manage.py | Python | bsd-3-clause | 1,092 |
import git
from git.exc import InvalidGitRepositoryError
from git.config import GitConfigParser
from io import BytesIO
import weakref
# typing -----------------------------------------------------------------------
from typing import Any, Sequence, TYPE_CHECKING, Union
from git.types import PathLike
if TYPE_CHECKING:
from .base import Submodule
from weakref import ReferenceType
from git.repo import Repo
from git.refs import Head
from git import Remote
from git.refs import RemoteReference
__all__ = ('sm_section', 'sm_name', 'mkhead', 'find_first_remote_branch',
'SubmoduleConfigParser')
#{ Utilities
def sm_section(name: str) -> str:
""":return: section title used in .gitmodules configuration file"""
return f'submodule "{name}"'
def sm_name(section: str) -> str:
""":return: name of the submodule as parsed from the section name"""
section = section.strip()
return section[11:-1]
def mkhead(repo: 'Repo', path: PathLike) -> 'Head':
""":return: New branch/head instance"""
return git.Head(repo, git.Head.to_full_path(path))
def find_first_remote_branch(remotes: Sequence['Remote'], branch_name: str) -> 'RemoteReference':
"""Find the remote branch matching the name of the given branch or raise InvalidGitRepositoryError"""
for remote in remotes:
try:
return remote.refs[branch_name]
except IndexError:
continue
# END exception handling
# END for remote
raise InvalidGitRepositoryError("Didn't find remote branch '%r' in any of the given remotes" % branch_name)
#} END utilities
#{ Classes
class SubmoduleConfigParser(GitConfigParser):
"""
Catches calls to _write, and updates the .gitmodules blob in the index
with the new data, if we have written into a stream. Otherwise it will
add the local file to the index to make it correspond with the working tree.
Additionally, the cache must be cleared
Please note that no mutating method will work in bare mode
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._smref: Union['ReferenceType[Submodule]', None] = None
self._index = None
self._auto_write = True
super(SubmoduleConfigParser, self).__init__(*args, **kwargs)
#{ Interface
def set_submodule(self, submodule: 'Submodule') -> None:
"""Set this instance's submodule. It must be called before
the first write operation begins"""
self._smref = weakref.ref(submodule)
def flush_to_index(self) -> None:
"""Flush changes in our configuration file to the index"""
assert self._smref is not None
# should always have a file here
assert not isinstance(self._file_or_files, BytesIO)
sm = self._smref()
if sm is not None:
index = self._index
if index is None:
index = sm.repo.index
# END handle index
index.add([sm.k_modules_file], write=self._auto_write)
sm._clear_cache()
# END handle weakref
#} END interface
#{ Overridden Methods
def write(self) -> None: # type: ignore[override]
rval: None = super(SubmoduleConfigParser, self).write()
self.flush_to_index()
return rval
# END overridden methods
#} END classes
| gitpython-developers/GitPython | git/objects/submodule/util.py | Python | bsd-3-clause | 3,358 |
from sympy.core import pi, oo, symbols, Function, Rational, Integer, GoldenRatio, EulerGamma, Catalan, Lambda, Dummy, Eq
from sympy.functions import Piecewise, sin, cos, Abs, exp, ceiling, sqrt, gamma
from sympy.utilities.pytest import raises
from sympy.printing.ccode import CCodePrinter
from sympy.utilities.lambdify import implemented_function
from sympy.tensor import IndexedBase, Idx
# import test
from sympy import ccode
x, y, z = symbols('x,y,z')
g = Function('g')
def test_printmethod():
class fabs(Abs):
def _ccode(self, printer):
return "fabs(%s)" % printer._print(self.args[0])
assert ccode(fabs(x)) == "fabs(x)"
def test_ccode_sqrt():
assert ccode(sqrt(x)) == "sqrt(x)"
assert ccode(x**0.5) == "sqrt(x)"
assert ccode(sqrt(x)) == "sqrt(x)"
def test_ccode_Pow():
assert ccode(x**3) == "pow(x, 3)"
assert ccode(x**(y**3)) == "pow(x, pow(y, 3))"
assert ccode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"pow(3.5*g(x), -x + pow(y, x))/(pow(x, 2) + y)"
assert ccode(x**-1.0) == '1.0/x'
assert ccode(x**Rational(2, 3)) == 'pow(x, 2.0L/3.0L)'
_cond_cfunc = [(lambda base, exp: exp.is_integer, "dpowi"),
(lambda base, exp: not exp.is_integer, "pow")]
assert ccode(x**3, user_functions={'Pow': _cond_cfunc}) == 'dpowi(x, 3)'
assert ccode(x**3.2, user_functions={'Pow': _cond_cfunc}) == 'pow(x, 3.2)'
def test_ccode_constants_mathh():
assert ccode(exp(1)) == "M_E"
assert ccode(pi) == "M_PI"
assert ccode(oo) == "HUGE_VAL"
assert ccode(-oo) == "-HUGE_VAL"
def test_ccode_constants_other():
assert ccode(2*GoldenRatio) == "double const GoldenRatio = 1.61803398874989;\n2*GoldenRatio"
assert ccode(
2*Catalan) == "double const Catalan = 0.915965594177219;\n2*Catalan"
assert ccode(2*EulerGamma) == "double const EulerGamma = 0.577215664901533;\n2*EulerGamma"
def test_ccode_Rational():
assert ccode(Rational(3, 7)) == "3.0L/7.0L"
assert ccode(Rational(18, 9)) == "2"
assert ccode(Rational(3, -7)) == "-3.0L/7.0L"
assert ccode(Rational(-3, -7)) == "3.0L/7.0L"
assert ccode(x + Rational(3, 7)) == "x + 3.0L/7.0L"
assert ccode(Rational(3, 7)*x) == "(3.0L/7.0L)*x"
def test_ccode_Integer():
assert ccode(Integer(67)) == "67"
assert ccode(Integer(-1)) == "-1"
def test_ccode_functions():
assert ccode(sin(x) ** cos(x)) == "pow(sin(x), cos(x))"
def test_ccode_inline_function():
x = symbols('x')
g = implemented_function('g', Lambda(x, 2*x))
assert ccode(g(x)) == "2*x"
g = implemented_function('g', Lambda(x, 2*x/Catalan))
assert ccode(
g(x)) == "double const Catalan = %s;\n2*x/Catalan" % Catalan.n()
A = IndexedBase('A')
i = Idx('i', symbols('n', integer=True))
g = implemented_function('g', Lambda(x, x*(1 + x)*(2 + x)))
assert ccode(g(A[i]), assign_to=A[i]) == (
"for (int i=0; i<n; i++){\n"
" A[i] = (A[i] + 1)*(A[i] + 2)*A[i];\n"
"}"
)
def test_ccode_exceptions():
assert ccode(ceiling(x)) == "ceil(x)"
assert ccode(Abs(x)) == "fabs(x)"
assert ccode(gamma(x)) == "tgamma(x)"
def test_ccode_user_functions():
x = symbols('x', integer=False)
n = symbols('n', integer=True)
custom_functions = {
"ceiling": "ceil",
"Abs": [(lambda x: not x.is_integer, "fabs"), (lambda x: x.is_integer, "abs")],
}
assert ccode(ceiling(x), user_functions=custom_functions) == "ceil(x)"
assert ccode(Abs(x), user_functions=custom_functions) == "fabs(x)"
assert ccode(Abs(n), user_functions=custom_functions) == "abs(n)"
def test_ccode_boolean():
assert ccode(x & y) == "x && y"
assert ccode(x | y) == "x || y"
assert ccode(~x) == "!x"
assert ccode(x & y & z) == "x && y && z"
assert ccode(x | y | z) == "x || y || z"
assert ccode((x & y) | z) == "z || x && y"
assert ccode((x | y) & z) == "z && (x || y)"
def test_ccode_Piecewise():
p = ccode(Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
if (x < 1) {
x
}
else {
pow(x, 2)
}\
"""
assert p == s
def test_ccode_Piecewise_deep():
p = ccode(2*Piecewise((x, x < 1), (x**2, True)))
s = \
"""\
2*((x < 1) ? (
x
)
: (
pow(x, 2)
) )\
"""
assert p == s
def test_ccode_settings():
raises(TypeError, lambda: ccode(sin(x), method="garbage"))
def test_ccode_Indexed():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o = symbols('n m o', integer=True)
i, j, k = Idx('i', n), Idx('j', m), Idx('k', o)
p = CCodePrinter()
p._not_c = set()
x = IndexedBase('x')[j]
assert p._print_Indexed(x) == 'x[j]'
A = IndexedBase('A')[i, j]
assert p._print_Indexed(A) == 'A[%s]' % (m*i+j)
B = IndexedBase('B')[i, j, k]
assert p._print_Indexed(B) == 'B[%s]' % (i*o*m+j*o+k)
assert p._not_c == set()
def test_ccode_Indexed_without_looking_for_contraction():
len_y = 5
y = IndexedBase('y', shape=(len_y,))
x = IndexedBase('x', shape=(len_y,))
Dy = IndexedBase('Dy', shape=(len_y-1,))
i = Idx('i', len_y-1)
e=Eq(Dy[i], (y[i+1]-y[i])/(x[i+1]-x[i]))
code0 = ccode(e.rhs, assign_to=e.lhs, contract=False)
assert code0 == 'Dy[i] = (y[%s] - y[i])/(x[%s] - x[i]);' % (i + 1, i + 1)
def test_ccode_loops_matrix_vector():
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j], assign_to=y[i])
assert c == s
def test_dummy_loops():
# the following line could also be
# [Dummy(s, integer=True) for s in 'im']
# or [Dummy(integer=True) for s in 'im']
i, m = symbols('i m', integer=True, cls=Dummy)
x = IndexedBase('x')
y = IndexedBase('y')
i = Idx(i, m)
expected = (
'for (int i_%(icount)i=0; i_%(icount)i<m_%(mcount)i; i_%(icount)i++){\n'
' y[i_%(icount)i] = x[i_%(icount)i];\n'
'}'
) % {'icount': i.label.dummy_index, 'mcount': m.dummy_index}
code = ccode(x[i], assign_to=y[i])
assert code == expected
def test_ccode_loops_add():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m = symbols('n m', integer=True)
A = IndexedBase('A')
x = IndexedBase('x')
y = IndexedBase('y')
z = IndexedBase('z')
i = Idx('i', m)
j = Idx('j', n)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = x[i] + z[i];\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = x[j]*A[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}'
)
c = ccode(A[i, j]*x[j] + x[i] + z[i], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_contractions():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = y[i] + b[%s]*a[%s];\n' % (j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode(b[j, k, l]*a[i, j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_addfactor():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
l = Idx('l', p)
s = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' for (int l=0; l<p; l++){\n'
' y[i] = (a[%s] + b[%s])*c[%s] + y[i];\n' % (i*n*o*p + j*o*p + k*p + l, i*n*o*p + j*o*p + k*p + l, j*o*p + k*p + l) +\
' }\n'
' }\n'
' }\n'
'}'
)
c = ccode((a[i, j, k, l] + b[i, j, k, l])*c[j, k, l], assign_to=y[i])
assert c == s
def test_ccode_loops_multiple_terms():
from sympy.tensor import IndexedBase, Idx
from sympy import symbols
n, m, o, p = symbols('n m o p', integer=True)
a = IndexedBase('a')
b = IndexedBase('b')
c = IndexedBase('c')
y = IndexedBase('y')
i = Idx('i', m)
j = Idx('j', n)
k = Idx('k', o)
s0 = (
'for (int i=0; i<m; i++){\n'
' y[i] = 0;\n'
'}\n'
)
s1 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[j]*b[k]*c[%s] + y[i];\n' % (i*n*o + j*o + k) +\
' }\n'
' }\n'
'}\n'
)
s2 = (
'for (int i=0; i<m; i++){\n'
' for (int k=0; k<o; k++){\n'
' y[i] = b[k]*a[%s] + y[i];\n' % (i*o + k) +\
' }\n'
'}\n'
)
s3 = (
'for (int i=0; i<m; i++){\n'
' for (int j=0; j<n; j++){\n'
' y[i] = b[j]*a[%s] + y[i];\n' % (i*n + j) +\
' }\n'
'}\n'
)
c = ccode(
b[j]*a[i, j] + b[k]*a[i, k] + b[j]*b[k]*c[i, j, k], assign_to=y[i])
assert (c == s0 + s1 + s2 + s3[:-1] or
c == s0 + s1 + s3 + s2[:-1] or
c == s0 + s2 + s1 + s3[:-1] or
c == s0 + s2 + s3 + s1[:-1] or
c == s0 + s3 + s1 + s2[:-1] or
c == s0 + s3 + s2 + s1[:-1])
| wdv4758h/ZipPy | edu.uci.python.benchmark/src/benchmarks/sympy/sympy/printing/tests/test_ccode.py | Python | bsd-3-clause | 10,134 |
from django.contrib import admin
from django.utils.translation import ugettext, ugettext_lazy as _
from ella.positions.models import Position
from ella.utils import timezone
class PositionOptions(admin.ModelAdmin):
def show_title(self, obj):
if not obj.target:
return '-- %s --' % ugettext('empty position')
else:
return u'%s [%s]' % (obj.target.title, ugettext(obj.target_ct.name),)
show_title.short_description = _('Title')
def is_filled(self, obj):
if obj.target:
return True
else:
return False
is_filled.short_description = _('Filled')
is_filled.boolean = True
def is_active(self, obj):
if obj.disabled:
return False
now = timezone.now()
active_from = not obj.active_from or obj.active_from <= now
active_till = not obj.active_till or obj.active_till > now
return active_from and active_till
is_active.short_description = _('Active')
is_active.boolean = True
list_display = ('name', 'category', 'box_type', 'is_active', 'is_filled', 'show_title', 'disabled',)
list_filter = ('category', 'name', 'disabled', 'active_from', 'active_till',)
search_fields = ('box_type', 'text',)
# suggest_fields = {'category': ('tree_path', 'title', 'slug',),}
admin.site.register(Position, PositionOptions)
| WhiskeyMedia/ella | ella/positions/admin.py | Python | bsd-3-clause | 1,381 |
from django.contrib import admin
from panoptes.tracking.models import AccountFilter
class AccountFilterAdmin(admin.ModelAdmin):
list_display = ('location', 'include_users', 'exclude_users')
ordering = ('location',)
admin.site.register(AccountFilter, AccountFilterAdmin)
| cilcoberlin/panoptes | panoptes/tracking/admin.py | Python | bsd-3-clause | 277 |
'''
c++ finally
'''
def myfunc():
b = False
try:
print('trying something that will fail...')
print('some call that fails at runtime')
f = open('/tmp/nosuchfile')
except:
print('got exception')
finally:
print('finally cleanup')
b = True
TestError( b == True )
def main():
myfunc()
| kustomzone/Rusthon | regtests/c++/try_except_finally.py | Python | bsd-3-clause | 301 |
# -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_pss.py: Self-test for PKCS#1 PSS signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto.Signature import PKCS1_PSS as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\t', '\n', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
# Helper class to count how many bytes have been requested
# from the key's private RNG, w/o counting those used for blinding
class MyKey:
def __init__(self, key):
self._key = key
self.n = key.n
self.asked = 0
def _randfunc(self, N):
self.asked += N
return self._key._randfunc(N)
def sign(self, m):
return self._key.sign(m)
def has_private(self):
return self._key.has_private()
def decrypt(self, m):
return self._key.decrypt(m)
def verify(self, m, p):
return self._key.verify(m, p)
def encrypt(self, m, p):
return self._key.encrypt(m, p)
class PKCS1_PSS_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 PSS
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0,
# and salt #3 after hashing it with #4
# Item #3: salt
# Item #4: hash object generator
_testData = (
#
# From in pss-vect.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5''',
'e':'''01 00 01''',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''50e2c3e38d886110288dfc68a9533e7e12e27d2aa56
d2cdb3fb6efa990bcff29e1d2987fb711962860e7391b1ce01
ebadb9e812d2fbdfaf25df4ae26110a6d7a26f0b810f54875e
17dd5c9fb6d641761245b81e79f8c88f0e55a6dcd5f133abd3
5f8f4ec80adf1bf86277a582894cb6ebcd2162f1c7534f1f49
47b129151b71'''
},
# Data to sign
'''85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98
90 fc''',
# Signature
'''8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e''',
# Salt
'''e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8
3b ce 7e 61''',
# Hash algorithm
SHA
),
#
# Example 1.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26
d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6
23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15
6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76
97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3
d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0
61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29
cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61
93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08
a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c
29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca
b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16
be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed
1b 73 31 8b 75 0a 01 67 d0''',
# Signature
'''90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71
fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87
e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2
26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15
df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9
c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65
98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4
c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c''',
# Salt
'''de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f
3e 67 76 af''',
# Hash
SHA
),
#
# Example 1.2 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c
f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f
71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13
22 6b 9e''',
# Signature
'''3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce
fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e
f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db
0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45
17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4
cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35
90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14
18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43''',
# Salt
'''ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f
d5 6d f4 2d''',
# Hash
SHA
),
#
# Example 2.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 d4 0c 1b cf 97 a6 8a e7 cd bd 8a 7b f3 e3 4f
a1 9d cc a4 ef 75 a4 74 54 37 5f 94 51 4d 88 fe
d0 06 fb 82 9f 84 19 ff 87 d6 31 5d a6 8a 1f f3
a0 93 8e 9a bb 34 64 01 1c 30 3a d9 91 99 cf 0c
7c 7a 8b 47 7d ce 82 9e 88 44 f6 25 b1 15 e5 e9
c4 a5 9c f8 f8 11 3b 68 34 33 6a 2f d2 68 9b 47
2c bb 5e 5c ab e6 74 35 0c 59 b6 c1 7e 17 68 74
fb 42 f8 fc 3d 17 6a 01 7e dc 61 fd 32 6c 4b 33
c9''',
'e':'''01 00 01''',
'd':'''02 7d 14 7e 46 73 05 73 77 fd 1e a2 01 56 57 72
17 6a 7d c3 83 58 d3 76 04 56 85 a2 e7 87 c2 3c
15 57 6b c1 6b 9f 44 44 02 d6 bf c5 d9 8a 3e 88
ea 13 ef 67 c3 53 ec a0 c0 dd ba 92 55 bd 7b 8b
b5 0a 64 4a fd fd 1d d5 16 95 b2 52 d2 2e 73 18
d1 b6 68 7a 1c 10 ff 75 54 5f 3d b0 fe 60 2d 5f
2b 7f 29 4e 36 01 ea b7 b9 d1 ce cd 76 7f 64 69
2e 3e 53 6c a2 84 6c b0 c2 dd 48 6a 39 fa 75 b1'''
},
# Message
'''da ba 03 20 66 26 3f ae db 65 98 48 11 52 78 a5
2c 44 fa a3 a7 6f 37 51 5e d3 36 32 10 72 c4 0a
9d 9b 53 bc 05 01 40 78 ad f5 20 87 51 46 aa e7
0f f0 60 22 6d cb 7b 1f 1f c2 7e 93 60''',
# Signature
'''01 4c 5b a5 33 83 28 cc c6 e7 a9 0b f1 c0 ab 3f
d6 06 ff 47 96 d3 c1 2e 4b 63 9e d9 13 6a 5f ec
6c 16 d8 88 4b dd 99 cf dc 52 14 56 b0 74 2b 73
68 68 cf 90 de 09 9a db 8d 5f fd 1d ef f3 9b a4
00 7a b7 46 ce fd b2 2d 7d f0 e2 25 f5 46 27 dc
65 46 61 31 72 1b 90 af 44 53 63 a8 35 8b 9f 60
76 42 f7 8f ab 0a b0 f4 3b 71 68 d6 4b ae 70 d8
82 78 48 d8 ef 1e 42 1c 57 54 dd f4 2c 25 89 b5
b3''',
# Salt
'''57 bf 16 0b cb 02 bb 1d c7 28 0c f0 45 85 30 b7
d2 83 2f f7''',
SHA
),
#
# Example 8.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f''',
'e':'''01 00 01''',
'd':'''6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9'''
},
# Message
'''81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb''',
# Signature
'''02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5''',
# Salt
'''1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e','d') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
key._randfunc = lambda N: test_salt
# The real test
signer = PKCS.new(key)
self.assertTrue(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(self._testData[i][2]))
def testVerify1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ long(rws(self._testData[i][0][x]),16) for x in ('n','e') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
# The real test
key._randfunc = lambda N: test_salt
verifier = PKCS.new(key)
self.assertFalse(verifier.can_sign())
result = verifier.verify(h, t2b(self._testData[i][2]))
self.assertTrue(result)
def testSignVerify(self):
h = SHA.new()
h.update(b('blah blah blah'))
rng = Random.new().read
key = MyKey(RSA.generate(1024,rng))
# Helper function to monitor what's request from MGF
global mgfcalls
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
# Verify that PSS is friendly to all ciphers
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
# Verify that sign() asks for as many random bytes
# as the hash output size
key.asked = 0
signer = PKCS.new(key)
s = signer.sign(h)
self.assertTrue(signer.verify(h, s))
self.assertEqual(key.asked, h.digest_size)
h = SHA.new()
h.update(b('blah blah blah'))
# Verify that sign() uses a different salt length
for sLen in (0,3,21):
key.asked = 0
signer = PKCS.new(key, saltLen=sLen)
s = signer.sign(h)
self.assertEqual(key.asked, sLen)
self.assertTrue(signer.verify(h, s))
# Verify that sign() uses the custom MGF
mgfcalls = 0
signer = PKCS.new(key, newMGF)
s = signer.sign(h)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
# Verify that sign() does not call the RNG
# when salt length is 0, even when a new MGF is provided
key.asked = 0
mgfcalls = 0
signer = PKCS.new(key, newMGF, 0)
s = signer.sign(h)
self.assertEqual(key.asked,0)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_PSS_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/Crypto/SelfTest/Signature/test_pkcs1_pss.py | Python | mit | 20,603 |
#! /usr/bin/env python
"""
This is an example that demonstrates how to use an
RGB led with BreakfastSerial. It assumes you have an
RGB led wired up with red on pin 10, green on pin 9,
and blue on pin 8.
"""
from BreakfastSerial import RGBLed, Arduino
from time import sleep
board = Arduino()
led = RGBLed(board, { "red": 10, "green": 9, "blue": 8 })
# Red (R: on, G: off, B: off)
led.red()
sleep(1)
# Green (R: off, G: on, B: off)
led.green()
sleep(1)
# Blue (R: off, G: off, B: on)
led.blue()
sleep(1)
# Yellow (R: on, G: on, B: off)
led.yellow()
sleep(1)
# Cyan (R: off, G: on, B: on)
led.cyan()
sleep(1)
# Purple (R: on, G: off, B: on)
led.purple()
sleep(1)
# White (R: on, G: on, B: on)
led.white()
sleep(1)
# Off (R: off, G: off, B: off)
led.off()
# Run an interactive shell so you can play (not required)
import code
code.InteractiveConsole(locals=globals()).interact()
| andyclymer/ControlBoard | lib/modules/BreakfastSerial/examples/rgb_led.py | Python | mit | 887 |
import os
import inspect
from lib import BaseTest
def changesRemove(_, s):
return s.replace(os.path.join(os.path.dirname(inspect.getsourcefile(BaseTest)), "changes"), "")
class EditRepo1Test(BaseTest):
"""
edit repo: change comment
"""
fixtureCmds = [
"aptly repo create repo1",
]
runCmd = "aptly repo edit -comment=Lala repo1"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo1", "repo-show")
class EditRepo2Test(BaseTest):
"""
edit repo: change distribution & component
"""
fixtureCmds = [
"aptly repo create -comment=Lala -component=non-free repo2",
]
runCmd = "aptly repo edit -distribution=wheezy -component=contrib repo2"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo2", "repo-show")
class EditRepo3Test(BaseTest):
"""
edit repo: no such repo
"""
runCmd = "aptly repo edit repo3"
expectedCode = 1
class EditRepo4Test(BaseTest):
"""
edit repo: add uploaders.json
"""
fixtureCmds = [
"aptly repo create repo4",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders2.json repo4"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo4", "repo_show")
class EditRepo5Test(BaseTest):
"""
edit repo: with broken uploaders.json
"""
fixtureCmds = [
"aptly repo create repo5",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders3.json repo5"
expectedCode = 1
class EditRepo6Test(BaseTest):
"""
edit local repo: with missing uploaders.json
"""
fixtureCmds = [
"aptly repo create repo6",
]
runCmd = "aptly repo edit -uploaders-file=${changes}/uploaders-not-found.json repo6"
expectedCode = 1
outputMatchPrepare = changesRemove
class EditRepo7Test(BaseTest):
"""
edit local repo: remove uploaders.json
"""
fixtureCmds = [
"aptly repo create -uploaders-file=${changes}/uploaders2.json repo7",
]
runCmd = "aptly repo edit -uploaders-file= repo7"
def check(self):
self.check_output()
self.check_cmd_output("aptly repo show repo7", "repo_show")
| neolynx/aptly | system/t09_repo/edit.py | Python | mit | 2,278 |
import re
import datetime
import dateutil.parser
from django.conf import settings
from django.utils import feedgenerator
from django.utils.html import linebreaks
from apps.social.models import MSocialServices
from apps.reader.models import UserSubscription
from utils import log as logging
from vendor.facebook import GraphAPIError
class FacebookFetcher:
def __init__(self, feed, options=None):
self.feed = feed
self.options = options or {}
def fetch(self):
page_name = self.extract_page_name()
if not page_name:
return
facebook_user = self.facebook_user()
if not facebook_user:
return
# If 'video', use video API to get embed:
# f.get_object('tastyvegetarian', fields='posts')
# f.get_object('1992797300790726', fields='embed_html')
feed = self.fetch_page_feed(facebook_user, page_name, 'name,about,posts,videos,photos')
data = {}
data['title'] = feed.get('name', "%s on Facebook" % page_name)
data['link'] = feed.get('link', "https://facebook.com/%s" % page_name)
data['description'] = feed.get('about', "%s on Facebook" % page_name)
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur Facebook API Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = self.feed.feed_address
rss = feedgenerator.Atom1Feed(**data)
merged_data = []
posts = feed.get('posts', {}).get('data', None)
if posts:
for post in posts:
story_data = self.page_posts_story(facebook_user, post)
if not story_data:
continue
merged_data.append(story_data)
videos = feed.get('videos', {}).get('data', None)
if videos:
for video in videos:
story_data = self.page_video_story(facebook_user, video)
if not story_data:
continue
for seen_data in merged_data:
if story_data['link'] == seen_data['link']:
# Video wins over posts (and attachments)
seen_data['description'] = story_data['description']
seen_data['title'] = story_data['title']
break
for story_data in merged_data:
rss.add_item(**story_data)
return rss.writeString('utf-8')
def extract_page_name(self):
page = None
try:
page_groups = re.search('facebook.com/(\w+)/?', self.feed.feed_address)
if not page_groups:
return
page = page_groups.group(1)
except IndexError:
return
return page
def facebook_user(self):
facebook_api = None
social_services = None
if self.options.get('requesting_user_id', None):
social_services = MSocialServices.get_user(self.options.get('requesting_user_id'))
facebook_api = social_services.facebook_api()
if not facebook_api:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No facebook API for %s' %
(self.feed.log_title[:30], self.feed.feed_address, self.options))
return
else:
usersubs = UserSubscription.objects.filter(feed=self.feed)
if not usersubs:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No subscriptions' %
(self.feed.log_title[:30], self.feed.feed_address))
return
for sub in usersubs:
social_services = MSocialServices.get_user(sub.user_id)
if not social_services.facebook_uid:
continue
facebook_api = social_services.facebook_api()
if not facebook_api:
continue
else:
break
if not facebook_api:
logging.debug(u' ***> [%-30s] ~FRFacebook fetch failed: %s: No facebook API for %s' %
(self.feed.log_title[:30], self.feed.feed_address, usersubs[0].user.username))
return
return facebook_api
def fetch_page_feed(self, facebook_user, page, fields):
try:
stories = facebook_user.get_object(page, fields=fields)
except GraphAPIError, e:
message = str(e).lower()
if 'session has expired' in message:
logging.debug(u' ***> [%-30s] ~FRFacebook page failed/expired, disconnecting facebook: %s: %s' %
(self.feed.log_title[:30], self.feed.feed_address, e))
self.feed.save_feed_history(560, "Facebook Error: Expired token")
return {}
if not stories:
return {}
return stories
def page_posts_story(self, facebook_user, page_story):
categories = set()
if 'message' not in page_story:
# Probably a story shared on the page's timeline, not a published story
return
message = linebreaks(page_story['message'])
created_date = page_story['created_time']
if isinstance(created_date, unicode):
created_date = dateutil.parser.parse(created_date)
fields = facebook_user.get_object(page_story['id'], fields='permalink_url,link,attachments')
permalink = fields.get('link', fields['permalink_url'])
attachments_html = ""
if fields.get('attachments', None) and fields['attachments']['data']:
for attachment in fields['attachments']['data']:
if 'media' in attachment:
attachments_html += "<img src=\"%s\" />" % attachment['media']['image']['src']
if attachment.get('subattachments', None):
for subattachment in attachment['subattachments']['data']:
attachments_html += "<img src=\"%s\" />" % subattachment['media']['image']['src']
content = """<div class="NB-facebook-rss">
<div class="NB-facebook-rss-message">%s</div>
<div class="NB-facebook-rss-picture">%s</div>
</div>""" % (
message,
attachments_html
)
story = {
'title': message,
'link': permalink,
'description': content,
'categories': list(categories),
'unique_id': "fb_post:%s" % page_story['id'],
'pubdate': created_date,
}
return story
def page_video_story(self, facebook_user, page_story):
categories = set()
if 'description' not in page_story:
return
message = linebreaks(page_story['description'])
created_date = page_story['updated_time']
if isinstance(created_date, unicode):
created_date = dateutil.parser.parse(created_date)
permalink = facebook_user.get_object(page_story['id'], fields='permalink_url')['permalink_url']
embed_html = facebook_user.get_object(page_story['id'], fields='embed_html')
if permalink.startswith('/'):
permalink = "https://www.facebook.com%s" % permalink
content = """<div class="NB-facebook-rss">
<div class="NB-facebook-rss-message">%s</div>
<div class="NB-facebook-rss-embed">%s</div>
</div>""" % (
message,
embed_html.get('embed_html', '')
)
story = {
'title': page_story.get('story', message),
'link': permalink,
'description': content,
'categories': list(categories),
'unique_id': "fb_post:%s" % page_story['id'],
'pubdate': created_date,
}
return story
def favicon_url(self):
page_name = self.extract_page_name()
facebook_user = self.facebook_user()
if not facebook_user:
logging.debug(u' ***> [%-30s] ~FRFacebook icon failed, disconnecting facebook: %s' %
(self.feed.log_title[:30], self.feed.feed_address))
return
try:
picture_data = facebook_user.get_object(page_name, fields='picture')
except GraphAPIError, e:
message = str(e).lower()
if 'session has expired' in message:
logging.debug(u' ***> [%-30s] ~FRFacebook icon failed/expired, disconnecting facebook: %s: %s' %
(self.feed.log_title[:30], self.feed.feed_address, e))
return
if 'picture' in picture_data:
return picture_data['picture']['data']['url']
| AlphaCluster/NewsBlur | utils/facebook_fetcher.py | Python | mit | 9,072 |
import io
import sys
isPython3 = sys.version_info >= (3, 0)
class Scribe:
@staticmethod
def read(path):
with io.open(path, mode="rt", encoding="utf-8") as f:
s = f.read()
# go to beginning
f.seek(0)
return s
@staticmethod
def read_beginning(path, lines):
with io.open(path, mode="rt", encoding="utf-8") as f:
s = f.read(lines)
# go to beginning
f.seek(0)
return s
@staticmethod
def read_lines(path):
with io.open(path, mode="rt", encoding="utf-8") as f:
content = f.readlines()
return content
@staticmethod
def write(contents, path):
if isPython3:
with open(path, mode="wt", encoding="utf-8") as f:
# truncate previous contents
f.truncate()
f.write(contents)
else:
with io.open(path, mode="wt", encoding="utf-8") as f:
# truncate previous contents
f.truncate()
f.write(contents.decode("utf8"))
@staticmethod
def write_lines(lines, path):
if isPython3:
with open(path, mode="wt", encoding="utf-8") as f:
f.writelines([l + "\n" for l in lines])
else:
with io.open(path, mode="wt") as f:
for line in lines:
f.writelines(line.decode("utf8") + "\n")
@staticmethod
def add_content(contents, path):
if isPython3:
with open(path, mode="a", encoding="utf-8") as f:
f.writelines(contents)
else:
with io.open(path, mode="a") as f:
f.writelines(contents.decode("utf8"))
| RobertoPrevato/Humbular | tools/knight/core/literature/scribe.py | Python | mit | 1,844 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script shows the categories on each page and lets you change them.
For each page in the target wiki:
* If the page contains no categories, you can specify a list of categories to
add to the page.
* If the page already contains one or more categories, you can specify a new
list of categories to replace the current list of categories of the page.
Usage:
python pwb.py catall [start]
If no starting name is provided, the bot starts at 'A'.
Options:
-onlynew : Only run on pages that do not yet have a category.
"""
#
# (C) Rob W.W. Hooft, Andre Engels, 2004
# (C) Pywikibot team, 2004-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, textlib
from pywikibot.bot import QuitKeyboardInterrupt
def choosecats(pagetext):
"""Coose categories."""
chosen = []
done = False
length = 1000
# TODO: → input_choice
pywikibot.output("""Give the new categories, one per line.
Empty line: if the first, don't change. Otherwise: Ready.
-: I made a mistake, let me start over.
?: Give the text of the page with GUI.
??: Give the text of the page in console.
xx: if the first, remove all categories and add no new.
q: quit.""")
while not done:
choice = pywikibot.input(u"?")
if choice == "":
done = True
elif choice == "-":
chosen = choosecats(pagetext)
done = True
elif choice == "?":
from pywikibot import editor as editarticle
editor = editarticle.TextEditor()
editor.edit(pagetext)
elif choice == "??":
pywikibot.output(pagetext[0:length])
length = length + 500
elif choice == "xx" and chosen == []:
chosen = None
done = True
elif choice == "q":
raise QuitKeyboardInterrupt
else:
chosen.append(choice)
return chosen
def make_categories(page, list, site=None):
"""Make categories."""
if site is None:
site = pywikibot.Site()
pllist = []
for p in list:
cattitle = "%s:%s" % (site.namespaces.CATEGORY, p)
pllist.append(pywikibot.Page(site, cattitle))
page.put_async(textlib.replaceCategoryLinks(page.get(), pllist,
site=page.site),
summary=i18n.twtranslate(site, 'catall-changing'))
def main(*args):
"""
Process command line arguments and perform task.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
docorrections = True
start = 'A'
local_args = pywikibot.handle_args(args)
for arg in local_args:
if arg == '-onlynew':
docorrections = False
else:
start = arg
mysite = pywikibot.Site()
for p in mysite.allpages(start=start):
try:
text = p.get()
cats = p.categories()
if not cats:
pywikibot.output(u"========== %s ==========" % p.title())
pywikibot.output('No categories')
pywikibot.output('-' * 40)
newcats = choosecats(text)
if newcats != [] and newcats is not None:
make_categories(p, newcats, mysite)
elif docorrections:
pywikibot.output(u"========== %s ==========" % p.title())
for c in cats:
pywikibot.output(c.title())
pywikibot.output('-' * 40)
newcats = choosecats(text)
if newcats is None:
make_categories(p, [], mysite)
elif newcats != []:
make_categories(p, newcats, mysite)
except pywikibot.IsRedirectPage:
pywikibot.output(u'%s is a redirect' % p.title())
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pywikibot.output('\nQuitting program...')
| h4ck3rm1k3/pywikibot-core | scripts/catall.py | Python | mit | 4,132 |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# NOTE: This class should not be instantiated and its
# ``traverse_and_document_shape`` method called directly. It should be
# inherited from a Documenter class with the appropriate methods
# and attributes.
from botocore.utils import is_json_value_header
class ShapeDocumenter(object):
EVENT_NAME = ''
def __init__(self, service_name, operation_name, event_emitter,
context=None):
self._service_name = service_name
self._operation_name = operation_name
self._event_emitter = event_emitter
self._context = context
if context is None:
self._context = {
'special_shape_types': {}
}
def traverse_and_document_shape(self, section, shape, history,
include=None, exclude=None, name=None,
is_required=False):
"""Traverses and documents a shape
Will take a self class and call its appropriate methods as a shape
is traversed.
:param section: The section to document.
:param history: A list of the names of the shapes that have been
traversed.
:type include: Dictionary where keys are parameter names and
values are the shapes of the parameter names.
:param include: The parameter shapes to include in the documentation.
:type exclude: List of the names of the parameters to exclude.
:param exclude: The names of the parameters to exclude from
documentation.
:param name: The name of the shape.
:param is_required: If the shape is a required member.
"""
param_type = shape.type_name
if shape.name in history:
self.document_recursive_shape(section, shape, name=name)
else:
history.append(shape.name)
is_top_level_param = (len(history) == 2)
getattr(self, 'document_shape_type_%s' % param_type,
self.document_shape_default)(
section, shape, history=history, name=name,
include=include, exclude=exclude,
is_top_level_param=is_top_level_param,
is_required=is_required)
if is_top_level_param:
self._event_emitter.emit(
'docs.%s.%s.%s.%s' % (self.EVENT_NAME,
self._service_name,
self._operation_name,
name),
section=section)
at_overlying_method_section = (len(history) == 1)
if at_overlying_method_section:
self._event_emitter.emit(
'docs.%s.%s.%s.complete-section' % (self.EVENT_NAME,
self._service_name,
self._operation_name),
section=section)
history.pop()
def _get_special_py_default(self, shape):
special_defaults = {
'jsonvalue_header': '{...}|[...]|123|123.4|\'string\'|True|None',
'streaming_input_shape': 'b\'bytes\'|file',
'streaming_output_shape': 'StreamingBody()'
}
return self._get_value_for_special_type(shape, special_defaults)
def _get_special_py_type_name(self, shape):
special_type_names = {
'jsonvalue_header': 'JSON serializable',
'streaming_input_shape': 'bytes or seekable file-like object',
'streaming_output_shape': ':class:`.StreamingBody`'
}
return self._get_value_for_special_type(shape, special_type_names)
def _get_value_for_special_type(self, shape, special_type_map):
if is_json_value_header(shape):
return special_type_map['jsonvalue_header']
for special_type, marked_shape in self._context[
'special_shape_types'].items():
if special_type in special_type_map:
if shape == marked_shape:
return special_type_map[special_type]
return None
| achang97/YouTunes | lib/python2.7/site-packages/botocore/docs/shape.py | Python | mit | 4,763 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.7
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_allog', [dirname(__file__)])
except ImportError:
import _allog
return _allog
if fp is not None:
try:
_mod = imp.load_module('_allog', fp, pathname, description)
finally:
fp.close()
return _mod
_allog = swig_import_helper()
del swig_import_helper
else:
import _allog
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def debug(*args):
return _allog.debug(*args)
debug = _allog.debug
def info(*args):
return _allog.info(*args)
info = _allog.info
def warning(*args):
return _allog.warning(*args)
warning = _allog.warning
def error(*args):
return _allog.error(*args)
error = _allog.error
def fatal(*args):
return _allog.fatal(*args)
fatal = _allog.fatal
# This file is compatible with both classic and new-style classes.
| mpatacchiola/naogui | zpgc_2016c/include/pynaoqi-python2.7-2.1.3.3-linux64/allog.py | Python | mit | 2,493 |
import logging
import codecs
from optparse import OptionParser
from pyjade.utils import process
import os
def convert_file():
support_compilers_list = ['django', 'jinja', 'underscore', 'mako', 'tornado']
available_compilers = {}
for i in support_compilers_list:
try:
compiler_class = __import__('pyjade.ext.%s' % i, fromlist=['pyjade']).Compiler
except ImportError, e:
logging.warning(e)
else:
available_compilers[i] = compiler_class
usage = "usage: %prog [options] file [output]"
parser = OptionParser(usage)
parser.add_option("-o", "--output", dest="output",
help="Write output to FILE", metavar="FILE")
parser.add_option("-c", "--compiler", dest="compiler",
choices=available_compilers.keys(),
default='django',
type="choice",
help="COMPILER must be one of %s, default is django" % ','.join(available_compilers.keys()))
parser.add_option("-e", "--ext", dest="extension",
help="Set import/extends default file extension", metavar="FILE")
options, args = parser.parse_args()
if len(args) < 1:
print "Specify the input file as the first argument."
exit()
file_output = options.output or (args[1] if len(args) > 1 else None)
compiler = options.compiler
if options.extension:
extension = '.%s'%options.extension
elif options.output:
extension = os.path.splitext(options.output)[1]
else:
extension = None
if compiler in available_compilers:
template = codecs.open(args[0], 'r', encoding='utf-8').read()
output = process(template, compiler=available_compilers[compiler], staticAttrs=True, extension=extension)
if file_output:
outfile = codecs.open(file_output, 'w', encoding='utf-8')
outfile.write(output)
else:
print output
else:
raise Exception('You must have %s installed!' % compiler)
if __name__ == '__main__':
convert_file()
| glennyonemitsu/MarkupHiveServer | src/pyjade/convert.py | Python | mit | 2,157 |
import os
import unittest
from lxml import etree
from hovercraft.template import (Template, CSS_RESOURCE, JS_RESOURCE,
JS_POSITION_BODY, JS_POSITION_HEADER)
TEST_DATA = os.path.join(os.path.split(__file__)[0], 'test_data')
class TemplateInfoTests(unittest.TestCase):
"""Tests that template information is correctly parsed"""
def test_template_paths(self):
# You can specify a folder or a cfg file and that's the same thing.
template_info1 = Template(os.path.join(TEST_DATA, 'minimal'))
template_info2 = Template(os.path.join(TEST_DATA, 'minimal', 'template.cfg'))
self.assertEqual(etree.tostring(template_info1.xml_node()),
etree.tostring(template_info2.xml_node()))
def test_template_minimal(self):
template_info = Template(os.path.join(TEST_DATA, 'minimal'))
with open(os.path.join(TEST_DATA, 'minimal', 'template.xsl'), 'rb') as xslfile:
xsl = xslfile.read()
self.assertEqual(template_info.xsl, xsl)
template_files = [each.filepath for each in template_info.resources]
self.assertIn('js/impress.js', template_files)
self.assertIn('js/hovercraft-minimal.js', template_files)
css_files = list(each.filepath for each in template_info.resources if
each.resource_type == CSS_RESOURCE)
self.assertEqual(len(css_files), 0)
self.assertEqual(template_info.doctype, b'<!DOCTYPE html>')
def test_template_maximal(self):
template_info = Template(os.path.join(TEST_DATA, 'maximal'))
with open(os.path.join(TEST_DATA, 'maximal', 'template.xsl'), 'rb') as xslfile:
xsl = xslfile.read()
self.assertEqual(template_info.xsl, xsl)
template_files = [each.filepath for each in template_info.resources]
self.assertIn('images/python-logo-master-v3-TM.png', template_files)
self.assertIn('js/impress.js', template_files)
self.assertIn('js/impressConsole.js', template_files)
self.assertIn('js/hovercraft.js', template_files)
js_bodies = [each.filepath for each in template_info.resources if
each.resource_type == JS_RESOURCE and
each.extra_info == JS_POSITION_BODY]
self.assertIn('js/impress.js', js_bodies)
self.assertIn('js/impressConsole.js', js_bodies)
self.assertIn('js/hovercraft.js', js_bodies)
js_headers = [each.filepath for each in template_info.resources if
each.resource_type == JS_RESOURCE and
each.extra_info == JS_POSITION_HEADER]
self.assertIn('js/dummy.js', js_headers)
self.assertEqual(template_info.resources[0].filepath, 'css/style.css')
self.assertEqual(template_info.resources[0].extra_info, 'all')
self.assertEqual(template_info.resources[1].filepath, 'css/print.css')
self.assertEqual(template_info.resources[1].extra_info, 'print')
self.assertEqual(template_info.resources[2].filepath, 'css/impressConsole.css')
self.assertEqual(template_info.resources[2].extra_info, 'screen,projection')
self.assertEqual(template_info.doctype, b'<!DOCTYPE html SYSTEM "about:legacy-compat">')
class TemplateInfoNodeTests(unittest.TestCase):
"""Tests that template information is correctly made into an xml nodes"""
def test_minimal_template(self):
template_info = Template(os.path.join(TEST_DATA, 'minimal'))
node = template_info.xml_node()
self.assertEqual(etree.tostring(node), (
b'<templateinfo><header/><body>'
b'<js src="js/impress.js"/><js src="js/hovercraft-minimal.js"/>'
b'</body></templateinfo>'))
def test_maximal_template(self):
template_info = Template(os.path.join(TEST_DATA, 'maximal'))
node = template_info.xml_node()
self.assertEqual(etree.tostring(node), (
b'<templateinfo><header>'
b'<css href="css/style.css" media="all"/>'
b'<css href="css/print.css" media="print"/>'
b'<css href="css/impressConsole.css" media="screen,projection"/>'
b'<js src="js/dummy.js"/></header>'
b'<body><js src="js/impress.js"/><js src="js/impressConsole.js"/>'
b'<js src="js/hovercraft.js"/>'
b'</body></templateinfo>'))
if __name__ == '__main__':
unittest.main()
| alexAubin/hovercraft | hovercraft/tests/test_template.py | Python | mit | 4,447 |
import operator
import os
import abc
import functools
import pyparsing as pp
from mitmproxy.utils import strutils
from mitmproxy.utils import human
import typing # noqa
from . import generators
from . import exceptions
class Settings:
def __init__(
self,
is_client=False,
staticdir=None,
unconstrained_file_access=False,
request_host=None,
websocket_key=None,
protocol=None,
):
self.is_client = is_client
self.staticdir = staticdir
self.unconstrained_file_access = unconstrained_file_access
self.request_host = request_host
self.websocket_key = websocket_key # TODO: refactor this into the protocol
self.protocol = protocol
Sep = pp.Optional(pp.Literal(":")).suppress()
v_integer = pp.Word(pp.nums)\
.setName("integer")\
.setParseAction(lambda toks: int(toks[0]))
v_literal = pp.MatchFirst(
[
pp.QuotedString(
"\"",
unquoteResults=True,
multiline=True
),
pp.QuotedString(
"'",
unquoteResults=True,
multiline=True
),
]
)
v_naked_literal = pp.MatchFirst(
[
v_literal,
pp.Word("".join(i for i in pp.printables if i not in ",:\n@\'\""))
]
)
class Token:
"""
A token in the specification language. Tokens are immutable. The token
classes have no meaning in and of themselves, and are combined into
Components and Actions to build the language.
"""
__metaclass__ = abc.ABCMeta
@classmethod
def expr(cls): # pragma: no cover
"""
A parse expression.
"""
return None
@abc.abstractmethod
def spec(self): # pragma: no cover
"""
A parseable specification for this token.
"""
return None
@property
def unique_name(self) -> typing.Optional[str]:
"""
Controls uniqueness constraints for tokens. No two tokens with the
same name will be allowed. If no uniquness should be applied, this
should be None.
"""
return self.__class__.__name__.lower()
def resolve(self, settings_, msg_):
"""
Resolves this token to ready it for transmission. This means that
the calculated offsets of actions are fixed.
settings: a language.Settings instance
msg: The containing message
"""
return self
def __repr__(self):
return self.spec()
class _TokValueLiteral(Token):
def __init__(self, val):
self.val = strutils.escaped_str_to_bytes(val)
def get_generator(self, settings_):
return self.val
def freeze(self, settings_):
return self
class TokValueLiteral(_TokValueLiteral):
"""
A literal with Python-style string escaping
"""
@classmethod
def expr(cls):
e = v_literal.copy()
return e.setParseAction(cls.parseAction)
@classmethod
def parseAction(cls, x):
v = cls(*x)
return v
def spec(self):
inner = strutils.bytes_to_escaped_str(self.val)
inner = inner.replace(r"'", r"\x27")
return "'" + inner + "'"
class TokValueNakedLiteral(_TokValueLiteral):
@classmethod
def expr(cls):
e = v_naked_literal.copy()
return e.setParseAction(lambda x: cls(*x))
def spec(self):
return strutils.bytes_to_escaped_str(self.val, escape_single_quotes=True)
class TokValueGenerate(Token):
def __init__(self, usize, unit, datatype):
if not unit:
unit = "b"
self.usize, self.unit, self.datatype = usize, unit, datatype
def bytes(self):
return self.usize * human.SIZE_UNITS[self.unit]
def get_generator(self, settings_):
return generators.RandomGenerator(self.datatype, self.bytes())
def freeze(self, settings):
g = self.get_generator(settings)
return TokValueLiteral(strutils.bytes_to_escaped_str(g[:], escape_single_quotes=True))
@classmethod
def expr(cls):
e = pp.Literal("@").suppress() + v_integer
u = functools.reduce(
operator.or_,
[pp.Literal(i) for i in human.SIZE_UNITS.keys()]
).leaveWhitespace()
e = e + pp.Optional(u, default=None)
s = pp.Literal(",").suppress()
s += functools.reduce(
operator.or_,
[pp.Literal(i) for i in generators.DATATYPES.keys()]
)
e += pp.Optional(s, default="bytes")
return e.setParseAction(lambda x: cls(*x))
def spec(self):
s = "@%s" % self.usize
if self.unit != "b":
s += self.unit
if self.datatype != "bytes":
s += ",%s" % self.datatype
return s
class TokValueFile(Token):
def __init__(self, path):
self.path = str(path)
@classmethod
def expr(cls):
e = pp.Literal("<").suppress()
e = e + v_naked_literal
return e.setParseAction(lambda x: cls(*x))
def freeze(self, settings_):
return self
def get_generator(self, settings):
if not settings.staticdir:
raise exceptions.FileAccessDenied("File access disabled.")
s = os.path.expanduser(self.path)
s = os.path.normpath(
os.path.abspath(os.path.join(settings.staticdir, s))
)
uf = settings.unconstrained_file_access
if not uf and not s.startswith(os.path.normpath(settings.staticdir)):
raise exceptions.FileAccessDenied(
"File access outside of configured directory"
)
if not os.path.isfile(s):
raise exceptions.FileAccessDenied("File not readable")
return generators.FileGenerator(s)
def spec(self):
return "<'%s'" % self.path
TokValue = pp.MatchFirst(
[
TokValueGenerate.expr(),
TokValueFile.expr(),
TokValueLiteral.expr()
]
)
TokNakedValue = pp.MatchFirst(
[
TokValueGenerate.expr(),
TokValueFile.expr(),
TokValueLiteral.expr(),
TokValueNakedLiteral.expr(),
]
)
TokOffset = pp.MatchFirst(
[
v_integer,
pp.Literal("r"),
pp.Literal("a")
]
)
class _Component(Token):
"""
A value component of the primary specification of an message.
Components produce byte values describing the bytes of the message.
"""
def values(self, settings): # pragma: no cover
"""
A sequence of values, which can either be strings or generators.
"""
pass
def string(self, settings=None):
"""
A bytestring representation of the object.
"""
return b"".join(i[:] for i in self.values(settings or {}))
class KeyValue(_Component):
"""
A key/value pair.
cls.preamble: leader
"""
def __init__(self, key, value):
self.key, self.value = key, value
@classmethod
def expr(cls):
e = pp.Literal(cls.preamble).suppress()
e += TokValue
e += pp.Literal("=").suppress()
e += TokValue
return e.setParseAction(lambda x: cls(*x))
def spec(self):
return "%s%s=%s" % (self.preamble, self.key.spec(), self.value.spec())
def freeze(self, settings):
return self.__class__(
self.key.freeze(settings), self.value.freeze(settings)
)
class CaselessLiteral(_Component):
"""
A caseless token that can take only one value.
"""
def __init__(self, value):
self.value = value
@classmethod
def expr(cls):
spec = pp.CaselessLiteral(cls.TOK)
spec = spec.setParseAction(lambda x: cls(*x))
return spec
def values(self, settings):
return self.TOK
def spec(self):
return self.TOK
def freeze(self, settings_):
return self
class OptionsOrValue(_Component):
"""
Can be any of a specified set of options, or a value specifier.
"""
preamble = ""
options = [] # type: typing.List[str]
def __init__(self, value):
# If it's a string, we were passed one of the options, so we lower-case
# it to be canonical. The user can specify a different case by using a
# string value literal.
self.option_used = False
if isinstance(value, str):
for i in self.options:
# Find the exact option value in a case-insensitive way
if i.lower() == value.lower():
self.option_used = True
value = TokValueLiteral(i)
break
self.value = value
@classmethod
def expr(cls):
parts = [pp.CaselessLiteral(i) for i in cls.options]
m = pp.MatchFirst(parts)
spec = m | TokValue.copy()
spec = spec.setParseAction(lambda x: cls(*x))
if cls.preamble:
spec = pp.Literal(cls.preamble).suppress() + spec
return spec
def values(self, settings):
return [
self.value.get_generator(settings)
]
def spec(self):
s = self.value.spec()
if s[1:-1].lower() in self.options:
s = s[1:-1].lower()
return "%s%s" % (self.preamble, s)
def freeze(self, settings):
return self.__class__(self.value.freeze(settings))
class Integer(_Component):
bounds = (None, None) # type: typing.Tuple[typing.Optional[int], typing.Optional[int]]
preamble = ""
def __init__(self, value):
v = int(value)
outofbounds = any([
self.bounds[0] is not None and v < self.bounds[0],
self.bounds[1] is not None and v > self.bounds[1]
])
if outofbounds:
raise exceptions.ParseException(
"Integer value must be between %s and %s." % self.bounds,
0, 0
)
self.value = str(value).encode()
@classmethod
def expr(cls):
e = v_integer.copy()
if cls.preamble:
e = pp.Literal(cls.preamble).suppress() + e
return e.setParseAction(lambda x: cls(*x))
def values(self, settings):
return [self.value]
def spec(self):
return "%s%s" % (self.preamble, self.value.decode())
def freeze(self, settings_):
return self
class Value(_Component):
"""
A value component lead by an optional preamble.
"""
preamble = ""
def __init__(self, value):
self.value = value
@classmethod
def expr(cls):
e = (TokValue | TokNakedValue)
if cls.preamble:
e = pp.Literal(cls.preamble).suppress() + e
return e.setParseAction(lambda x: cls(*x))
def values(self, settings):
return [self.value.get_generator(settings)]
def spec(self):
return "%s%s" % (self.preamble, self.value.spec())
def freeze(self, settings):
return self.__class__(self.value.freeze(settings))
class FixedLengthValue(Value):
"""
A value component lead by an optional preamble.
"""
preamble = ""
length = None # type: typing.Optional[int]
def __init__(self, value):
Value.__init__(self, value)
lenguess = None
try:
lenguess = len(value.get_generator(Settings()))
except exceptions.RenderError:
pass
# This check will fail if we know the length upfront
if lenguess is not None and lenguess != self.length:
raise exceptions.RenderError(
"Invalid value length: '%s' is %s bytes, should be %s." % (
self.spec(),
lenguess,
self.length
)
)
def values(self, settings):
ret = Value.values(self, settings)
l = sum(len(i) for i in ret)
# This check will fail if we don't know the length upfront - i.e. for
# file inputs
if l != self.length:
raise exceptions.RenderError(
"Invalid value length: '%s' is %s bytes, should be %s." % (
self.spec(),
l,
self.length
)
)
return ret
class Boolean(_Component):
"""
A boolean flag.
name = true
-name = false
"""
name = ""
def __init__(self, value):
self.value = value
@classmethod
def expr(cls):
e = pp.Optional(pp.Literal("-"), default=True)
e += pp.Literal(cls.name).suppress()
def parse(s_, loc_, toks):
val = True
if toks[0] == "-":
val = False
return cls(val)
return e.setParseAction(parse)
def spec(self):
return "%s%s" % ("-" if not self.value else "", self.name)
class IntField(_Component):
"""
An integer field, where values can optionally specified by name.
"""
names = {} # type: typing.Dict[str, int]
max = 16
preamble = ""
def __init__(self, value):
self.origvalue = value
self.value = self.names.get(value, value)
if self.value > self.max:
raise exceptions.ParseException(
"Value can't exceed %s" % self.max, 0, 0
)
@classmethod
def expr(cls):
parts = [pp.CaselessLiteral(i) for i in cls.names.keys()]
m = pp.MatchFirst(parts)
spec = m | v_integer.copy()
spec = spec.setParseAction(lambda x: cls(*x))
if cls.preamble:
spec = pp.Literal(cls.preamble).suppress() + spec
return spec
def values(self, settings):
return [str(self.value)]
def spec(self):
return "%s%s" % (self.preamble, self.origvalue)
| MatthewShao/mitmproxy | pathod/language/base.py | Python | mit | 13,851 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| laudaa/bitcoin | test/functional/fundrawtransaction.py | Python | mit | 32,075 |
# -*- coding: utf-8 -*-
"""
pygments.styles.borland
~~~~~~~~~~~~~~~~~~~~~~~
Style similar to the style used in the Borland IDEs.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class BorlandStyle(Style):
"""
Style similar to the style used in the borland IDEs.
"""
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #008800',
Comment.Preproc: 'noitalic #008080',
Comment.Special: 'noitalic bold',
String: '#0000FF',
String.Char: '#800080',
Number: '#0000FF',
Keyword: 'bold #000080',
Operator.Word: 'bold',
Name.Tag: 'bold #000080',
Name.Attribute: '#FF0000',
Generic.Heading: '#999999',
Generic.Subheading: '#aaaaaa',
Generic.Deleted: 'bg:#ffdddd #000000',
Generic.Inserted: 'bg:#ddffdd #000000',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/Editra/src/extern/pygments/styles/borland.py | Python | mit | 1,613 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
from django.conf.urls import *
from omeroweb.webstart import views
urlpatterns = patterns('django.views.generic.simple',
url( r'^$', views.index, name="webstart_index" ),
url( r'^jars/insight\.jnlp$', views.insight, name='webstart_insight'),
)
| jballanc/openmicroscopy | components/tools/OmeroWeb/omeroweb/webstart/urls.py | Python | gpl-2.0 | 1,117 |
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
All the little functions that make life nicer in the Traits package.
.. moduleauthor:: Mihai Andrei <[email protected]>
.. moduleauthor:: Lia Domide <[email protected]>
.. moduleauthor:: marmaduke <[email protected]>
"""
import numpy
import collections
import inspect
from tvb.basic.profile import TvbProfile
# returns true if key is, by convention, public
ispublic = lambda key: key[0] is not '_'
def str_class_name(thing, short_form=False):
"""
A helper function that tries to generate an informative name for its
argument: when passed a class, return its name, when passed an object
return a string representation of that value.
"""
# if thing is a class, it has attribute __name__
if hasattr(thing, '__name__'):
cls = thing
if short_form:
return cls.__name__
return cls.__module__ + '.' + cls.__name__
else:
# otherwise, it's an object and we return its __str__
return str(thing)
def get(obj, key, default=None):
"""
get() is a general function allowing us to ignore whether we are
getting from a dictionary or object. If obj is a dictionary, we
return the value corresponding to key, otherwise we return the
attribute on obj corresponding to key. In both cases, if key
does not exist, default is returned.
"""
if type(obj) is dict:
return obj.get(key, default)
else:
return getattr(obj, key) if hasattr(obj, key) else default
def log_debug_array(log, array, array_name, owner=""):
"""
Simple access to debugging info on an array.
"""
if TvbProfile.current.TRAITS_CONFIGURATION.use_storage:
return
# Hide this logs in web-mode, with storage, because we have multiple storage exceptions
if owner != "":
name = ".".join((owner, array_name))
else:
name = array_name
if array is not None and hasattr(array, 'shape'):
shape = str(array.shape)
dtype = str(array.dtype)
has_nan = str(numpy.isnan(array).any())
array_max = str(array.max())
array_min = str(array.min())
log.debug("%s shape: %s" % (name, shape))
log.debug("%s dtype: %s" % (name, dtype))
log.debug("%s has NaN: %s" % (name, has_nan))
log.debug("%s maximum: %s" % (name, array_max))
log.debug("%s minimum: %s" % (name, array_min))
else:
log.debug("%s is None or not Array" % name)
Args = collections.namedtuple('Args', 'pos kwd')
class TypeRegister(list):
"""
TypeRegister is a smart list that can be queried to obtain selections of the
classes inheriting from Traits classes.
"""
def subclasses(self, obj, avoid_subclasses=False):
"""
The subclasses method takes a class (or given instance object, will use
the class of the instance), and returns a list of all options known to
this TypeRegister that are direct subclasses of the class or have the
class in their base class list.
:param obj: Class or instance
:param avoid_subclasses: When specified, subclasses are not retrieved, only current class.
"""
cls = obj if inspect.isclass(obj) else obj.__class__
if avoid_subclasses:
return [cls]
if hasattr(cls, '_base_classes'):
bases = cls._base_classes
else:
bases = []
sublcasses = [opt for opt in self if ((issubclass(opt, cls) or cls in opt.__bases__)
and not inspect.isabstract(opt) and opt.__name__ not in bases)]
return sublcasses
def multiline_math_directives_to_matjax(doc):
"""
Looks for multi-line sphinx math directives in the given rst string
It converts them in html text that will be interpreted by mathjax
The parsing is simplistic, not a rst parser.
Wraps .. math :: body in \[\begin{split}\end{split}\]
"""
# doc = text | math
BEGIN = r'\[\begin{split}'
END = r'\end{split}\]'
in_math = False # 2 state parser
out_lines = []
indent = ''
for line in doc.splitlines():
if not in_math:
# math = indent directive math_body
indent, sep, _ = line.partition('.. math::')
if sep:
out_lines.append(BEGIN)
in_math = True
else:
out_lines.append(line)
else:
# math body is at least 1 space more indented than the directive, but we tolerate empty lines
if line.startswith(indent + ' ') or line.strip() == '':
out_lines.append(line)
else:
# this line is not properly indented, math block is over
out_lines.append(END)
out_lines.append(line)
in_math = False
if in_math:
# close math tag
out_lines.append(END)
return '\n'.join(out_lines) | echohenry2006/tvb-library | tvb/basic/traits/util.py | Python | gpl-2.0 | 6,354 |
#coding: utf-8
import os
import time
from . import test_util
def test_add_file():
test_util.mkfile(1, 'a.md', 'add a file')
test_util.verify_result()
def test_add_file_t():
test_util.mkfile(2, 'l/m/n/test.md', 'add l/m/n/test.md')
test_util.verify_result()
def test_add_dir():
test_util.mkdir(1, 'ad')
test_util.verify_result()
def test_add_dir_t():
test_util.mkdir(2, 'tt/ee/st')
test_util.verify_result()
def test_modify_file():
test_util.modfile(1, 'a.md', 'modify a.md')
test_util.verify_result()
def test_rm_file():
test_util.rmfile(1, 'a.md')
test_util.verify_result()
def test_rm_dir():
test_util.rmdir(1, 'ad')
test_util.verify_result()
def test_rename_file():
test_util.mkfile(2, 'b.md', 'add b.md')
time.sleep(1)
test_util.move(2, 'b.md', 'b_bak.md')
test_util.verify_result()
def test_rename_dir():
test_util.mkdir(2, 'ab')
time.sleep(1)
test_util.move(2, 'ab', 'ab_bak')
test_util.verify_result()
def test_each():
test_util.mkdir(1, 'abc1')
test_util.mkfile(1, 'abc1/c.md', 'add abc1/c.md')
time.sleep(1)
test_util.mkdir(2, 'bcd1')
test_util.mkfile(2, 'bcd1/d.md', 'add bcd1/d.md')
test_util.verify_result()
def test_unsync_resync():
test_util.desync_cli1()
test_util.rmdir(1, 'abc1')
test_util.modfile(1, 'bcd1/d.md', 'modify bcd1/d.md to test unsync resync')
test_util.sync_cli1()
test_util.verify_result()
if not os.path.exists(test_util.getpath(1, 'abc1')):
assert False, 'dir abc1 should be recreated when resync'
if len(os.listdir(test_util.getpath(1, 'bcd1'))) != 2:
assert False, 'should generate conflict file for bcd1/d.md when resync'
def test_modify_timestamp():
test_util.touch(1, 'bcd1/d.md')
test_util.verify_result()
| zhengger/seafile | tests/sync-auto-test/test_cases/test_simple.py | Python | gpl-2.0 | 1,826 |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Import and export of snippets.
"""
import os
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QMessageBox, QTreeWidget, QTreeWidgetItem
import app
import appinfo
import qutil
import userguide
import widgets.dialog
from . import model
from . import snippets
from . import builtin
def save(names, filename):
"""Saves the named snippets to a file."""
root = ET.Element('snippets')
root.text = '\n\n'
root.tail = '\n'
d = ET.ElementTree(root)
comment = ET.Comment(_comment.format(appinfo=appinfo))
comment.tail = '\n\n'
root.append(comment)
for name in names:
snippet = ET.Element('snippet')
snippet.set('id', name)
snippet.text = '\n'
snippet.tail = '\n\n'
title = ET.Element('title')
title.text = snippets.title(name, False)
title.tail = '\n'
shortcuts = ET.Element('shortcuts')
ss = model.shortcuts(name)
if ss:
shortcuts.text = '\n'
for s in ss:
shortcut = ET.Element('shortcut')
shortcut.text = s.toString()
shortcut.tail = '\n'
shortcuts.append(shortcut)
shortcuts.tail = '\n'
body = ET.Element('body')
body.text = snippets.text(name)
body.tail = '\n'
snippet.append(title)
snippet.append(shortcuts)
snippet.append(body)
root.append(snippet)
d.write(filename, "UTF-8")
def load(filename, widget):
"""Loads snippets from a file, displaying them in a list.
The user can then choose:
- overwrite builtin snippets or not
- overwrite own snippets with same title or not
- select and view snippets contents.
"""
try:
d = ET.parse(filename)
elements = list(d.findall('snippet'))
if not elements:
raise ValueError(_("No snippets found."))
except Exception as e:
QMessageBox.critical(widget, app.caption(_("Error")),
_("Can't read from source:\n\n{url}\n\n{error}").format(
url=filename, error=e))
return
dlg = widgets.dialog.Dialog(widget)
dlg.setWindowModality(Qt.WindowModal)
dlg.setWindowTitle(app.caption(_("dialog title", "Import Snippets")))
tree = QTreeWidget(headerHidden=True, rootIsDecorated=False)
dlg.setMainWidget(tree)
userguide.addButton(dlg.buttonBox(), "snippet_import_export")
allnames = frozenset(snippets.names())
builtins = frozenset(builtin.builtin_snippets)
titles = dict((snippets.title(n), n) for n in allnames if n not in builtins)
new = QTreeWidgetItem(tree, [_("New Snippets")])
updated = QTreeWidgetItem(tree, [_("Updated Snippets")])
unchanged = QTreeWidgetItem(tree, [_("Unchanged Snippets")])
new.setFlags(Qt.ItemIsEnabled)
updated.setFlags(Qt.ItemIsEnabled)
unchanged.setFlags(Qt.ItemIsEnabled)
new.setExpanded(True)
updated.setExpanded(True)
items = []
for snip in elements:
item = QTreeWidgetItem()
item.body = snip.find('body').text
item.title = snip.find('title').text
item.shortcuts = list(e.text for e in snip.findall('shortcuts/shortcut'))
title = item.title or snippets.maketitle(snippets.parse(item.body).text)
item.setText(0, title)
name = snip.get('id')
name = name if name in builtins else None
# determine if new, updated or unchanged
if not name:
name = titles.get(title)
item.name = name
if not name or name not in allnames:
new.addChild(item)
items.append(item)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
item.setCheckState(0, Qt.Checked)
elif name:
if (item.body != snippets.text(name)
or title != snippets.title(name)
or (item.shortcuts and item.shortcuts !=
[s.toString() for s in model.shortcuts(name) or ()])):
updated.addChild(item)
items.append(item)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
item.setCheckState(0, Qt.Checked)
else:
unchanged.addChild(item)
item.setFlags(Qt.ItemIsEnabled)
# count:
for i in new, updated, unchanged:
i.setText(0, i.text(0) + " ({0})".format(i.childCount()))
for i in new, updated:
if i.childCount():
i.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
i.setCheckState(0, Qt.Checked)
def changed(item):
if item in (new, updated):
for i in range(item.childCount()):
c = item.child(i)
c.setCheckState(0, item.checkState(0))
tree.itemChanged.connect(changed)
importShortcuts = QTreeWidgetItem([_("Import Keyboard Shortcuts")])
if items:
tree.addTopLevelItem(importShortcuts)
importShortcuts.setFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable)
importShortcuts.setCheckState(0, Qt.Checked)
dlg.setMessage(_("Choose which snippets you want to import:"))
else:
dlg.setMessage(_("There are no new or updated snippets in the file."))
unchanged.setExpanded(True)
tree.setWhatsThis(_(
"<p>Here the snippets from {filename} are displayed.</p>\n"
"<p>If there are new or updated snippets, you can select or deselect "
"them one by one, or all at once, using the checkbox of the group. "
"Then click OK to import all the selected snippets.</p>\n"
"<p>Existing, unchanged snippets can't be imported.</p>\n"
).format(filename=os.path.basename(filename)))
qutil.saveDialogSize(dlg, "snippettool/import/size", QSize(400, 300))
if not dlg.exec_() or not items:
return
ac = model.collection()
m = model.model()
with qutil.busyCursor():
for i in items:
if i.checkState(0) == Qt.Checked:
index = m.saveSnippet(i.name, i.body, i.title)
if i.shortcuts and importShortcuts.checkState(0):
shortcuts = list(map(QKeySequence.fromString, i.shortcuts))
ac.setShortcuts(m.name(index), shortcuts)
widget.updateColumnSizes()
_comment = """
Created by {appinfo.appname} {appinfo.version}.
Every snippet is represented by:
title: title text
shortcuts: list of shortcut elements, every shortcut is a key sequence
body: the snippet text
The snippet id attribute can be the name of a builtin snippet or a random
name like 'n123456'. In the latter case, the title is used to determine
whether a snippet is new or updated.
"""
| wbsoft/frescobaldi | frescobaldi_app/snippet/import_export.py | Python | gpl-2.0 | 7,763 |
##########################################################################
# Author: Jane Curry, [email protected]
# Date: April 19th, 2011
# Revised:
#
# interfaces.py for Predictive Threshold ZenPack
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""interfaces.py
Representation of Predictive Threshold components.
$Id: info.py,v 1.2 2010/12/14 20:45:46 jc Exp $"""
__version__ = "$Revision: 1.4 $"[11:-2]
from Products.Zuul.interfaces import IInfo, IFacade
from Products.Zuul.interfaces.template import IThresholdInfo
from Products.Zuul.form import schema
from Products.Zuul.utils import ZuulMessageFactory as _t
class IPredThresholdInfo(IThresholdInfo):
"""
Interfaces for Predictive Threshold
"""
# pointval = schema.List(title=_t(u"Data Point"), xtype='datapointitemselector', order=6)
escalateCount = schema.Int(title=_t(u'Escalate Count'), order=9)
alpha = schema.Text(title=_t(u'Alpha'), order=10)
beta = schema.Text(title=_t(u'Beta'), order=11)
gamma = schema.Text(title=_t(u'Gamma'), order=12)
rows = schema.Text(title=_t(u'Rows'), order=13)
season = schema.Text(title=_t(u'Season'), order=14)
window = schema.Text(title=_t(u'Window'), order=15)
threshold = schema.Text(title=_t(u'Threshold'), order=16)
delta = schema.Text(title=_t(u'Delta'), order=17)
predcolor = schema.Text(title=_t(u'Prediction Color'), order=18)
cbcolor = schema.Text(title=_t(u'Confidence Band Color'), order=19)
tkcolor = schema.Text(title=_t(u'Tick Color'), order=20)
# pointval = schema.List(title=_t(u"Data Point"), xtype='datapointitemselector')
# escalateCount = schema.Int(title=_t(u'Escalate Count'))
# alpha = schema.Text(title=_t(u'Alpha'))
# beta = schema.Text(title=_t(u'Beta'))
# gamma = schema.Text(title=_t(u'Gamma'))
# rows = schema.Text(title=_t(u'Rows'))
# season = schema.Text(title=_t(u'Season'))
# window = schema.Text(title=_t(u'Window'))
# threshold = schema.Text(title=_t(u'Threshold'))
# delta = schema.Text(title=_t(u'Delta'))
# predcolor = schema.Text(title=_t(u'Prediction Color'))
# cbcolor = schema.Text(title=_t(u'Confidence Band Color'))
# tkcolor = schema.Text(title=_t(u'Tick Color'))
| jcurry/ZenPacks.community.PredictiveThreshold | ZenPacks/community/PredictiveThreshold/interfaces.py | Python | gpl-2.0 | 2,440 |
def ExOh(str):
temp = list(str)
xcount = 0
ocount = 0
for c in temp:
if c == "x":
xcount += 1
if c == "o":
ocount += 1
if xcount == ocount:
print "true"
elif xcount != ocount:
print "false"
ExOh(raw_input()) | ohgodscience/Python | Exercises/ExOh.py | Python | gpl-2.0 | 230 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# StartOS Device Manager(ydm).
# Copyright (C) 2011 ivali, Inc.
# hechao <[email protected]>, 2011.
__author__="hechao"
__date__ ="$2011-12-20 16:36:20$"
import gc
from xml.parsers import expat
from hwclass import *
class Device:
def __init__(self, dev_xml):
self.description = ''
self.product = ''
self.vendor = ''
self.version = ''
self.businfo = ''
self.logicalname = ''
self.date = ''
self.serial = ''
self.capacity = ''
self.width = ''
self.clock = ''
self.slot = ''
self.size = ''
self.config = {}
self.capability = []
self.attr = {}
self.dev_type = {}
self.pcid = {}
self._parser = expat.ParserCreate()
self._parser.buffer_size = 102400
self._parser.StartElementHandler = self.start_handler
self._parser.CharacterDataHandler = self.data_handler
self._parser.EndElementHandler = self.end_handler
self._parser.returns_unicode = False
fd = file(dev_xml)
self._parser.ParseFile(fd)
fd.close()
def start_handler(self, tag, attrs):
self.flag = tag
if tag == "node":
self.attr = attrs
elif tag == "setting":
self.config.setdefault(attrs["id"], attrs["value"])
elif tag == "capability":
self.capability.append(attrs["id"])
def data_handler(self, data):
if(data == '\n'):
return
if(data.isspace()):
return
if self.flag == "description":
self.description = data.strip()
elif self.flag == "product":
self.product = data.strip()
elif self.flag == "vendor":
self.vendor = data.strip()
elif self.flag == "businfo":
self.businfo = data.strip()
elif self.flag == "logicalname":
self.logicalname = data.strip()
elif self.flag == "version":
self.version = data.strip()
elif self.flag == "date":
self.date = data.strip()
elif self.flag == "serial":
self.serial = data.strip()
elif self.flag == "capacity":
self.capacity = data.strip()
elif self.flag == "width":
self.width = data.strip()
elif self.flag == "clock":
self.clock = data.strip()
elif self.flag == "slot":
self.slot = data.strip()
elif self.flag == "size":
self.size = data.strip()
def end_handler(self, tag):
if tag == "node":
if self.attr["class"] == "system":
system = System(self.description, self.product, self.vendor, self.version, \
self.serial, self.width, self.config, self.capability)
self.dev_type.setdefault((0, "system"), []).append(system)
elif self.attr["id"].split(":")[0] == "cpu" and self.attr["class"] == "processor":
cpu = Cpu(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.slot, self.size, self.capacity, self.width, self.clock, self.config, self.capability)
self.dev_type.setdefault((1, "cpu"), []).append(cpu)
elif self.attr["id"].split(":")[0] == "cache" and self.attr["class"] == "memory":
cache = Cache(self.description, self.product, self.vendor, self.version, self.slot, self.size)
self.dev_type.setdefault((1, "cpu"), []).append(cache)
elif (self.attr["id"] == "core" or self.attr["id"] == "board") and self.attr["class"] == "bus":
motherboard = Motherboard(self.description, self.product, self.vendor, self.version, self.serial)
self.dev_type.setdefault((2, "motherboard"), []).append(motherboard)
elif self.attr["id"] == "firmware" and self.attr["class"] == "memory":
bios = Bios(self.description, self.product, self.vendor, self.version, \
self.date, self.size, self.capability)
self.dev_type.setdefault((2, "motherboard"), []).append(bios)
elif self.attr["id"].split(":")[0] == "memory" and self.attr["class"] == "memory":
memory = Memory(self.description, self.product, self.vendor, self.version, \
self.slot, self.size)
self.dev_type.setdefault((3, "memory"), []).append(memory)
elif self.attr["id"].split(":")[0] == "bank" and self.attr["class"] == "memory":
bank = Bank(self.description, self.product, self.vendor, self.version, \
self.serial, self.slot, self.size, self.width, self.clock)
self.dev_type.setdefault((3, "memory"), []).append(bank)
elif self.attr["id"].split(":")[0] == "display" and self.attr["class"] == "display":
display = Display(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((4, "display"), []).append(display)
self.pcid[display.pcid] = "display"
if get_monitor():
monitor = Monitor("", "", "", "")
self.dev_type.setdefault((5, "monitor"), [monitor])#.append(monitor)
elif self.attr["id"].split(":")[0] == "disk" and self.attr["class"] == "disk":
disk = Disk(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.size, self.config, self.capability)
self.dev_type.setdefault((6, "disk"), []).append(disk)
elif self.attr["id"].split(":")[0] == "cdrom" and self.attr["class"] == "disk":
cdrom = Cdrom(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.config, self.capability)
self.dev_type.setdefault((7, "cdrom"), []).append(cdrom)
elif self.attr["class"] == "storage" and self.attr["handle"]:
storage = Storage(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.config, self.capability)
self.dev_type.setdefault((8, "storage"), []).append(storage)
elif (self.attr["class"] == "network") or (self.attr["id"].split(":")[0] == "bridge" \
and self.attr["class"] == "bridge"):
network = Network(self.description, self.product, self.vendor, self.version, \
self.businfo, self.logicalname, self.serial, self.capacity, self.config, self.capability)
self.dev_type.setdefault((9, "network"), []).append(network)
self.pcid[network.pcid] = "network"
elif self.attr["class"] == "multimedia":
media = Multimedia(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((10, "multimedia"), []).append(media)
self.pcid[media.pcid] = "multimedia"
elif self.attr["class"] == "input":
imput = Imput(self.description, self.product, self.vendor, self.version, \
self.businfo, self.config, self.capability)
self.dev_type.setdefault((11, "input"), []).append(imput)
self.pcid[imput.pcid] = "input"
elif self.attr["id"].split(":")[0] != "generic" and self.attr["class"] == "generic":
generic = Generic(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(generic)
self.pcid[generic.pcid] = "generic"
elif self.attr["id"].split(":")[0] != "communication" and self.attr["class"] == "communication":
modem = Modem(self.description, self.product, self.vendor, self.version, \
self.businfo, self.serial, self.config, self.capability)
self.dev_type.setdefault((12, "generic"), []).append(modem)
elif self.attr["id"].split(":")[0] == "battery" and self.attr["class"] == "power":
power = Power(self.description, self.product, self.vendor, self.version, \
self.slot, self.capacity, self.config)
self.dev_type.setdefault((12, "generic"), []).append(power)
self.clear()
def clear(self):
self.description = ''
self.product = ''
self.vendor = ''
self.version = ''
self.businfo = ''
self.logicalname = ''
self.date = ''
self.serial = ''
self.capacity = ''
self.width = ''
self.clock = ''
self.slot = ''
self.size = ''
self.config = {}
self.capability = []
self.attr = {}
def close(self):
del self._parser
gc.collect()
| jun-zhang/device-manager | src/lib/ydevicemanager/devices.py | Python | gpl-2.0 | 8,950 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitoring', '0002_monitoring_update'),
]
operations = [
migrations.RemoveField(
model_name='requestevent',
name='resources',
),
migrations.AddField(
model_name='requestevent',
name='resources',
field=models.ManyToManyField(help_text='List of resources affected', to='monitoring.MonitoredResource', null=True, blank=True),
),
]
| tomkralidis/geonode | geonode/monitoring/migrations/0003_monitoring_resources.py | Python | gpl-3.0 | 572 |
# encoding: utf-8
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Application.content_type'
db.add_column('applications_application', 'content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True), keep_default=False)
# Adding field 'Application.object_id'
db.add_column('applications_application', 'object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'UserApplication.object_id'
db.delete_column('applications_userapplication', 'object_id')
# Deleting field 'UserApplication.content_type'
db.delete_column('applications_userapplication', 'content_type_id')
def backwards(self, orm):
# Deleting field 'Application.content_type'
db.delete_column('applications_application', 'content_type_id')
# Deleting field 'Application.object_id'
db.delete_column('applications_application', 'object_id')
# Adding field 'UserApplication.object_id'
db.add_column('applications_userapplication', 'object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'UserApplication.content_type'
db.add_column('applications_userapplication', 'content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True, blank=True), keep_default=False)
models = {
'applications.applicant': {
'Meta': {'object_name': 'Applicant'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Institute']", 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '16', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'applications.application': {
'Meta': {'object_name': 'Application'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'content_type_temp': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'app_temp_obj'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Person']", 'null': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {}),
'header_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'object_id_temp': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'secret_token': ('django.db.models.fields.CharField', [], {'default': "'f0369b28f1adc73f2c0c351ed377febb0fa872d4'", 'unique': 'True', 'max_length': '64'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'submitted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'applications.projectapplication': {
'Meta': {'object_name': 'ProjectApplication', '_ormbases': ['applications.Application']},
'additional_req': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'application_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['applications.Application']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Institute']"}),
'machine_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['machines.MachineCategory']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_applications': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['applications.UserApplication']", 'null': 'True', 'blank': 'True'})
},
'applications.userapplication': {
'Meta': {'object_name': 'UserApplication', '_ormbases': ['applications.Application']},
'application_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['applications.Application']", 'unique': 'True', 'primary_key': 'True'}),
'make_leader': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'needs_account': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'machines.machinecategory': {
'Meta': {'object_name': 'MachineCategory', 'db_table': "'machine_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'people.institute': {
'Meta': {'object_name': 'Institute', 'db_table': "'institute'"},
'active_delegate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'active_delegate'", 'null': 'True', 'to': "orm['people.Person']"}),
'delegate': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'delegate'", 'null': 'True', 'to': "orm['people.Person']"}),
'gid': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'sub_delegates': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sub_delegates'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['people.Person']"})
},
'people.person': {
'Meta': {'object_name': 'Person', 'db_table': "'person'"},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'department': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Institute']"}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'supervisor': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'projects.project': {
'Meta': {'object_name': 'Project', 'db_table': "'project'"},
'additional_req': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_approver'", 'null': 'True', 'to': "orm['people.Person']"}),
'date_approved': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_deleted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_deletor'", 'null': 'True', 'to': "orm['people.Person']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'institute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Institute']"}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_expertise': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_usage': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'leaders': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'leaders'", 'symmetrical': 'False', 'to': "orm['people.Person']"}),
'machine_categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'projects'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['machines.MachineCategory']"}),
'machine_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['machines.MachineCategory']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['people.Person']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['applications']
| Karaage-Cluster/karaage-debian | karaage/legacy/applications/south_migrations/0004_auto__add_field_application_content_type__add_field_application_object.py | Python | gpl-3.0 | 17,096 |
#!/usr/bin/env python
#
# This file is part of the pebil project.
#
# Copyright (c) 2010, University of California Regents
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Counter for a folder
# /pmaclabs/icepic/ti10_round1_icepic_large_0256/processed_trace
# Executable:
# AvgCacheCalc.py
#
# This will calculate the Average of Cache hits for a series of processed metasim files.
#
#
# Usage:
#
# A number of arguments are needed. The arguments determine how to select the set of files to process
# and whether to compute the average across all files or not.
#
# Either sysid or taskid is required
# sysid - calculates a single average for all files with the same sysid
# - use with --sysid option to speciy which sysid to use.
# - for file icepic_large_0256_0127.sysid44, the sysid is 44
#
# taskid - prints an average for each file with the same task id (ie 1 set of averages for for each sysid found)
# - use with --taskid option to specify the task id
# - for file icepic_large_0256_0127.sysid44, the taskid is 0127
#
#
# app icepic,hycom,..."
# dataset large, standard..."
# cpu_count 256,1024,... input will be padded with 0s to 4 digits
# Optional:
# dir - current dir is used if these argument is not given
# As an example take the folder:
# /pmaclabs/ti10/ti10_round1_icepic_large_0256/processed_trace
#
#
# SysID mode:
#mattl@trebek[21]$ ./AvgCacheCalc.py --app icepic --dataset large --cpu_count 1024 --sysid 99 --dir /pmaclabs/ti10/ti10_round1_icepic_large_1024/processed_trace/
# #
# Reading files from: /pmaclabs/ti10/ti10_round1_icepic_large_1024/processed_trace/
# Averaging for all files like icepic_large_1024*.sysid99
# Number of files: 1024
# Cache 1 average %= 98.365459015 incl(98.365459015)
# Cache 2 average %= 0.000974823792366 incl(98.3654743948)
# Cache 3 average %= 0.0 incl(98.3654743948)
#
#
# TaskID:
# mattl@trebek[20]$ ./AvgCacheCalc.py --app icepic --dataset large --cpu_count 1024 --taskid 125 --dir /pmaclabs/ti10/ti10_round1_icepic_large_1024/processed_trace/
#
# Reading files from: /pmaclabs/ti10/ti10_round1_icepic_large_1024/processed_trace/
# Averaging for all files like icepic_large_1024_0125*
# Number of files: 32
# sysid0 99.5021899287
# sysid3 98.3544410843 98.4873748354
# sysid4 99.0521953314 99.0939555641
# sysid21 98.2867244765 98.496093132
# sysid22 98.8836107446 99.0731860899 99.5543906444
# sysid23 98.086753753 98.4952485239
# sysid44 98.8836107446 99.0772427056 99.5790751053
# sysid54 96.785672042 99.0781143074
# sysid64 98.3544410843 98.4789295449 98.4817196019
# sysid67 74.5078816751
# sysid68 23.7552154266
# sysid69 30.5848561276
# sysid70 33.5335710304
# sysid71 37.710498373
# sysid72 98.2910942185 98.2910942244 98.2910942244
# sysid73 98.3544410843 98.4789295449 98.49290069
# sysid74 98.3544410843 98.4789295449 98.4887431283
# sysid75 98.9182843857 99.0849451175 99.5487031836
# sysid77 98.086753753 98.4769519456 98.4956922971
# sysid78 98.9182843857 99.0849451175 99.1358601016
# sysid81 98.2910942185 98.2910942244 98.2910942244
# sysid82 98.2910942185 98.2910942244 98.2910942244
# sysid96 98.3544410843 98.4789295449 98.4928364694
# sysid97 98.3544410843 98.4789295449 98.492618417
# sysid98 98.2910942185 98.2910942244 98.2910942244
# sysid99 98.2910942185 98.2910942244 98.2910942244
# sysid100 98.3544410843 98.4789295449 98.4884141107
# sysid101 98.3544410843 98.4789295449 98.4884425654
# sysid102 98.2910942185 98.2910942244 98.2910942244
# sysid103 98.2910942185 98.2910942244 98.2910942244
# sysid104 98.086753753 98.4769519456 98.5007917366
# sysid105 98.086753753 98.4769519456 98.4966562518
import sys, os, glob, re
# function for getting average for a single file
# used by sysid argument
def getAvgSingle(fileNameGetAvg):
"""calcuates the average cache hits for a file"""
#print "TESTING:: file:", fileNameGetAvg
#this part is from counter_mikey.py
try:
traceFile = open(fileNameGetAvg, 'r')
fileLines = traceFile.readlines()
traceFile.close()
except IOError:
print "Warning: file" + traceFile, "not found"
exit()
#Process file
#count the lines, track each line in a list
everyLine = []
totalMisses = []
totalAccesses = []
myLine = fileLines[0].split()
cLevel = len(myLine)-3
#print "TESTING:: cLevel=", cLevel
for i in range(0,cLevel,1):
totalMisses.append(0)
totalAccesses.append(0)
if cLevel < 1 or cLevel > 3:
print "FATAL: Expected 1, 2 or 3 cache levels"
exit()
idx = 1
for myLine in fileLines:
# tokenize the line and verify that we get the correct number of tokens
myLine = myLine.split()
if cLevel != len(myLine)-3:
print "FATAL: expected " + cLevel + " hit rates on line " + str(idx)
exit()
# ascribe each token to an aptly-named variable
#blockid = long(myLine[0]) ###ASK MIKEY - needed?###
#fpCount = long(myLine[1]) ###ASK MIKEY - needed?###
memCount = long(myLine[2])
inclRate = []
for i in range(0,len(totalMisses),1):
inclRate.append(float(myLine[i+3]))
#print "myLine[", i+3, "]= ", myLine[i+3]
# convert each inclusive hit rate to an exclusive rate
exclRate = [inclRate[0]]
for i in range(1,len(inclRate),1):
thisRate = float(inclRate[i])
prevRate = float(inclRate[i-1])
if prevRate < 100.0:
exclRate.append(100.0*float(thisRate - prevRate)/(100.0 - prevRate))
else:
exclRate.append(float(0.0))
blockAccesses = []
blockMisses = []
blockAccesses.append(memCount)
## blockHits[n] stores the number of memory accesses that make it to cache level N
for i in range(0,len(totalMisses)-1,1):
blockMisses.append((blockAccesses[i]*(float(100.0)-exclRate[i]))/float(100.0))
# print "block L" + str(i+1) + " misses: " + str(blockMisses[i])
blockAccesses.append(blockMisses[i])
# print "block L" + str(i+2) + " accesses: " + str(blockAccesses[i+1])
blockMisses.append(blockAccesses[cLevel-1]*((100.0-exclRate[cLevel-1])/100.0))
# print "block L" + str(cLevel) + " misses: " + str(blockMisses[cLevel-1])
for i in range(0,len(totalMisses),1):
totalMisses[i] += blockMisses[i]
totalAccesses[i] += blockAccesses[i]
totalHits = 0
cacheValues = []
for i in range(0,len(totalMisses),1):
levelHits = (totalAccesses[i] - totalMisses[i])
totalHits += levelHits
#assign values to tuple and return
cacheValues.append((levelHits)/(totalAccesses[i])*100)
cacheValues.append(100*totalHits/totalAccesses[0])
#print "Cache " + str(i+1) + " average %= " + str((levelHits)/(totalAccesses[i])*100) + " incl(" + str(100*totalHits/totalAccesses[0]) + ")"
#print "cacheValues:", cacheValues
return cacheValues
# function for getting average for a single file
# used by taskid argument
# printType: 1 = taskid prints ex: taskid0001
# printType: 2 = sysid pritns sysid72
def printAvgSingle(fileNamepAvg, printType):
#print "Avg for file:", fileNamepAvg
fileidx = fileNamepAvg.rfind("/")
shortfileName = fileNamepAvg[fileidx+1:]
#print "TESTING: FileName:", shortfileName
# get the sysid# for printing later
try:
sysidname = shortfileName[shortfileName.index('.')+1:]
taskidname = shortfileName[shortfileName.index('.')-4:shortfileName.index('.')]
#print "TESTING: sysidname=", sysidname
except ValueError:
print "ERROR: Invalid filename no '.' found in filename:", shortfileName
exit()
except IndexError: #If file has '.' as last char, this could error
print "Error: Invalid location of . in filename. this shouldn't happen-", shortfileName
exit()
#lifted from counter_mikey.py
try:
traceFile = open(fileNamepAvg, 'r')
except IOError, NameError:
print "ERROR: can't find that file: " + fileNamepAvg
exit()
#Process file
#count the lines, track each line in a list
everyLine = []
fileLines = traceFile.readlines()
traceFile.close()
myLine = fileLines[0].split()
cLevel = len(myLine)-3
totalMisses = []
totalAccesses = []
for i in range(0,cLevel,1):
totalMisses.append(0)
totalAccesses.append(0)
####validate cLevel 4,5, or 6 is expected
#print "TESTING: This file has", cLevel, "cache level(s)"
##print "Eachline has", len(myLines), "columns"
if cLevel < 1 or cLevel > 3:
print "ERROR: Expected 1, 2, or 3 cache levels"
exit()
#### create if, else for cLevel = 4,5,6
idx = 1
for myLine in fileLines:
# tokenize the line and verify that we get the correct number of tokens
myLine = myLine.split()
if cLevel != len(myLine)-3:
print "ERROR: expected " + cLevel + " hit rates on line " + str(idx)
# ascribe each token to an aptly-named variable
blockid = long(myLine[0])
fpCount = long(myLine[1])
memCount = long(myLine[2])
inclRate = []
for i in range(0,len(totalMisses),1):
inclRate.append(float(myLine[i+3]))
# convert each inclusive hit rate to an exclusive rate
exclRate = [inclRate[0]]
for i in range(1,len(inclRate),1):
thisRate = float(inclRate[i])
prevRate = float(inclRate[i-1])
if prevRate < 100.0:
exclRate.append(100.0*float(thisRate - prevRate)/(100.0 - prevRate))
else:
exclRate.append(float(0.0))
# print str(myLine) + ' -> ',
# print str(blockid) + '\t' + str(fpCount) + '\t' + str(memCount),
# for i in range(0,len(exclRate),1):
# print '\t' + str(exclRate[i]),
# print ''
blockAccesses = []
blockMisses = []
blockAccesses.append(memCount)
# print "block L1 accesses: " + str(blockAccesses[0])
## blockHits[n] stores the number of memory accesses that make it to cache level N
for i in range(0,len(totalMisses)-1,1):
blockMisses.append((blockAccesses[i]*(float(100.0)-exclRate[i]))/float(100.0))
# print "block L" + str(i+1) + " misses: " + str(blockMisses[i])
blockAccesses.append(blockMisses[i])
# print "block L" + str(i+2) + " accesses: " + str(blockAccesses[i+1])
blockMisses.append(blockAccesses[cLevel-1]*((100.0-exclRate[cLevel-1])/100.0))
# print "block L" + str(cLevel) + " misses: " + str(blockMisses[cLevel-1])
for i in range(0,len(totalMisses),1):
totalMisses[i] += blockMisses[i]
totalAccesses[i] += blockAccesses[i]
# if printType == 1:
# print "taskid" + str(taskidname),
# if printType == 2:
# print sysidname.rjust(8),
print shortfileName,
totalHits = 0
for i in range(0,len(totalMisses),1):
levelHits = (totalAccesses[i] - totalMisses[i])
totalHits += levelHits
#print "Cache " + str(i+1) + " average %= " + str((levelHits)/(totalAccesses[i])*100) + " incl(" + str(100*totalHits/totalAccesses[0]) + ")"
print str(100*totalHits/totalAccesses[0]).ljust(13),
print ""
# used to sort list of files in natural or numeric order
def sort_nicely( l ):
""" Sort the given list in the way that humans expect.
"""
def convert(text):
if text.isdigit():
return int(text)
else:
return text
##convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
l.sort( key=alphanum_key )
#prints a usage error message and exits
def errorMsg():
print
print "Usage : ./AvgCacheCalc.py\n"
print "required:"
print "\t--app string; eg icepic,hycom,..."
print "\t--dataset string; eg large, standard..."
print "\t--cpu_count int; eg 256,1024,...\n"
print "One of these two are required:"
print "\t--taskid int; eg 0001"
print "\t--sysid int; 1, 2, or 3 chars - 75\n"
print "optional"
print "\t--dir string; eg /pmaclabs/ti10/ti10_icepic_standard_0128/processed_trace/ [default=.]"
exit()
diridx = -1
sysidx = -1
taskidx = -1
sysidindexerr = 0
taskidindexerr = 0
try:
#check for sysid
sysidx = sys.argv.index("--sysid")
#print "past sysidx ValueError"
#print "sysidx=", sysidx
if sysidx != -1:
sysid = sys.argv[sysidx+1]
#print "sysid=", sysid
except IndexError:
print "TESTING: IndexError:No --sysid argument. ->pass"
sysidindexerr = 1
pass
except ValueError:
#print "TESTING: ValueError --sysid ->pass"
# sysid may not be needed, taskid maybe used
pass
try:
# check for taskid
taskidx = sys.argv.index("--taskid")
task = sys.argv[taskidx+1]
# pad task with 0s if needed
while len(task) < 4:
task = "0"+task
#print "TESTING:task=", task
#print "taskidx=", taskidx
#print "taskid=", task
except IndexError:
print "TESTING: IndexError: No --taskid argument. ->pass"
taskidindexerr = 1
pass
except ValueError:
#print "TESTING: ValueError --taskid ->pass"
pass
# if neither sysid or taskid is used, error
if sysidx == -1 and taskidx == -1:
print "Either --sysid or --taskid required - neither used"
errorMsg()
# if both sysid and taskid are used, error
if sysidx != -1 and taskidx != -1:
print "Either --sysid or --taskid required - both used"
errorMsg()
# check to make sure sys or task value was given
# needed because we skipped this check before
#print "Testing: sysidx and taskidx sysidx=", sysidx," taskidx=", taskidx
if sysidx != -1 and sysidindexerr == 1: # we are using sysid and there was a no value after
print "No --sysid value given, please provide argument\n"
errorMsg()
if taskidx != -1 and taskidindexerr == 1: # we are using taskid and there was a no value after
print "No --taskid value given, please provide argument\n"
errorMsg()
# check for dir
# not required, uses current dir
try:
diridx = sys.argv.index("--dir")
except ValueError:
#print"no dir->ok"
pass # if --dir is not used, current dir willbe used
try:
if diridx == -1:
dirRead = os.getcwd() #use currend dir if none given
#print "TESTING: current dir used dir=", dirRead
else:
dirRead = sys.argv[diridx+1]
#print "TESTING: input used ***WHAT ABOUT SLASH AT END*** dir=", dirRead
#pad a '/' to the end of the directory
if dirRead[-1] != '/':
dirRead = dirRead + '/'
except IndexError:
print "No --dir value given, please provide argument\n"
errorMsg()
except ValueError:
print "TESTING:Error with --dir argument, see usage below\n"
errorMsg()
try:
#check for app
appidx = sys.argv.index("--app")
appname = sys.argv[appidx+1]
#print "app=", appname
except IndexError:
print "No --app value given, please provide argument\n"
errorMsg()
except ValueError:
print "Error with --app argument, see usage below\n"
errorMsg()
try:
#check for dataset
datasetidx = sys.argv.index("--dataset")
datasetname = sys.argv[datasetidx+1]
#print "dataset=", datasetname
except IndexError:
print "No --dataset value given, please provide argument\n"
errorMsg()
except ValueError:
print "Error with --dataset argument, see usage below\n"
errorMsg()
try:
#check for cpu_count
cpuidx = sys.argv.index("--cpu_count")
cpu = sys.argv[cpuidx+1]
#print "cpu=", cpu
#print "cpu type:", type(cpu)
cpulen = len(cpu)
if cpulen > 4: # error if more than 4 digits
print "ERROR: cpu_count cannot be greater than 4 digits"
exit()
if cpulen < 4: # pad with 0 if less than 4 digits
#print "TESTING: cpulen:", cpulen, "needs to be 4"
while len(cpu) < 4:
cpu = "0"+cpu
cpulen = len(cpu)
except IndexError:
print "No --cpu_count value given, please provide argument\n"
errorMsg()
except ValueError:
print "Error with --cpu_count argument, see usage below\n"
errorMsg()
fileName = appname+"_"+datasetname+"_"+cpu
# print "filename:", fileName
# print "dirRead:", dirRead
print
print "Reading files from: ", dirRead
#gets the list of files with a matching sysid value
if taskidx == -1: #use sysid
print "Averaging for all files like "+fileName+"*.sysid"+sysid
fileList = glob.glob(dirRead+fileName+"*.sysid"+sysid)
#or gets the list of files with the taskid value
elif sysidx == -1: #use taskid
print "Averaging for all files like "+fileName+"_"+task+"*"
#print dirRead+fileName+"_"+task+".*"
fileList = glob.glob(dirRead+fileName+"_"+task+".*")
else:
print "ERROR: No one should get here either taskid or sysid should have been validated"
errorMsg()
#for i in range(len(fileList)):
# print "TESTING:filelist[", i, "]=",fileList[i]
#sort the list of files
#fileList.sort()
#sort numerically
sort_nicely(fileList)
#print "fileList[0]:", fileList[0]
#print "fileList type:", type(fileList)
# Catch if there are no files matching the request
if len(fileList) == 0:
print "ERROR: No files match input...exiting"
exit()
print "Number of files: ", len(fileList)
#print "This may take a moment"
if taskidx == -1: #use sysid
dirAvg = []
# goes through each file and collects all the averages
# inclusive and exclusive for Caches 1-3 (if 2 and 3 are present)
for i in range(0, len(fileList)):
dirAvg.append(getAvgSingle(fileList[i]))
printAvgSingle(fileList[i],1)
print "\n *** Averaged Hit rates ***"
print fileName+"*.sysid"+sysid,
numCache = len(dirAvg[0])
totalCache = [0,0,0,0,0,0]
#print "TESTING:numcache for avg of files is:", numCache
#print "TESTING:dirAvg[0]=", dirAvg[0]
#print "TESTING:len(dirAvg[0])=", len(dirAvg[0])
#print "TESTING:len(dirAvg)=", len(dirAvg)
#print "TESTING:numCache range= ", range(numCache)
#calcute averages for the folder
for i in range(len(dirAvg)):
#if len(dirAvg[i]) > 4:
#print "TESTING:dirAvg[",i,"][4]=", dirAvg[i][4]
for j in range(numCache):
#print "::j=",j,"dirAvg[",i,"][",j,"]= ", dirAvg[i][j]
totalCache[j] = totalCache[j]+dirAvg[i][j]
#print values of the cache
for i in range(0, len(totalCache), 2):
if totalCache[i+1] !=0:
print totalCache[i+1]/len(dirAvg),
#print "excl", totalCache[i]/len(dirAvg)
#print "Cache " + str((i/2)+1) + " average %= " + str(totalCache[i]/len(dirAvg)) + " incl(" + str(totalCache[i+1]/len(dirAvg)) + ")"
elif sysidx == -1: #use taskid
for i in range(0, len(fileList)):
printAvgSingle(fileList[i],2)
| DoNotUseThisCodeJUSTFORKS/PEBIL | scripts/AvgCacheCalc.py | Python | gpl-3.0 | 18,915 |
"""Solar analemma."""
from ._skyBase import RadianceSky
from ..material.light import Light
from ..geometry.source import Source
from ladybug.epw import EPW
from ladybug.sunpath import Sunpath
import os
try:
from itertools import izip as zip
writemode = 'wb'
except ImportError:
# python 3
writemode = 'w'
class Analemma(RadianceSky):
"""Generate a radiance-based analemma.
Use Analemma for solar access/sunlight hours studies. For annual daylight/radiation
studies see AnalemmaReversed.
Analemma consists of two files:
1. *.ann file which includes sun geometries and materials.
2. *.mod file includes list of modifiers that are included in *.ann file.
"""
def __init__(self, sun_vectors, sun_up_hours):
"""Radiance-based analemma.
Args:
sun_vectors: A list of sun vectors as (x, y, z).
sun_up_hours: List of hours of the year that corresponds to sun_vectors.
"""
RadianceSky.__init__(self)
vectors = sun_vectors or []
# reverse sun vectors
self._sun_vectors = tuple(tuple(v) for v in vectors)
self._sun_up_hours = sun_up_hours
assert len(sun_up_hours) == len(vectors), \
ValueError(
'Length of vectors [%d] does not match the length of hours [%d]' %
(len(vectors), len(sun_up_hours))
)
@classmethod
def from_json(cls, inp):
"""Create an analemma from a dictionary."""
return cls(inp['sun_vectors'], inp['sun_up_hours'])
@classmethod
def from_location(cls, location, hoys=None, north=0, is_leap_year=False):
"""Generate a radiance-based analemma for a location.
Args:
location: A ladybug location.
hoys: A list of hours of the year (default: range(8760)).
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
sun_vectors = []
sun_up_hours = []
hoys = hoys or range(8760)
north = north or 0
sp = Sunpath.from_location(location, north)
sp.is_leap_year = is_leap_year
for hour in hoys:
sun = sp.calculate_sun_from_hoy(hour)
if sun.altitude < 0:
continue
sun_vectors.append(sun.sun_vector)
sun_up_hours.append(hour)
return cls(sun_vectors, sun_up_hours)
@classmethod
def from_location_sun_up_hours(cls, location, sun_up_hours, north=0,
is_leap_year=False):
"""Generate a radiance-based analemma for a location.
Args:
location: A ladybug location.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
sun_vectors = []
north = north or 0
sp = Sunpath.from_location(location, north)
sp.is_leap_year = is_leap_year
for hour in sun_up_hours:
sun = sp.calculate_sun_from_hoy(hour)
sun_vectors.append(sun.sun_vector)
return cls(sun_vectors, sun_up_hours)
@classmethod
def from_wea(cls, wea, hoys=None, north=0, is_leap_year=False):
"""Generate a radiance-based analemma from a ladybug wea.
NOTE: Only the location from wea will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
wea: A ladybug Wea.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location(wea.location, hoys, north, is_leap_year)
@classmethod
def from_wea_sun_up_hours(cls, wea, sun_up_hours, north=0, is_leap_year=False):
"""Generate a radiance-based analemma from a ladybug wea.
NOTE: Only the location from wea will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
wea: A ladybug Wea.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location_sun_up_hours(wea.location, sun_up_hours, north,
is_leap_year)
@classmethod
def from_epw_file(cls, epw_file, hoys=None, north=0, is_leap_year=False):
"""Create sun matrix from an epw file.
NOTE: Only the location from epw file will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
epw_file: Full path to an epw file.
hoys: A list of hours of the year (default: range(8760)).
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location(EPW(epw_file).location, hoys, north, is_leap_year)
@classmethod
def from_epw_file_sun_up_hours(cls, epw_file, sun_up_hours, north=0,
is_leap_year=False):
"""Create sun matrix from an epw file.
NOTE: Only the location from epw file will be used for creating analemma. For
climate-based sun materix see SunMatrix class.
Args:
epw_file: Full path to an epw file.
sun_up_hours: A list of hours of the year to be included in analemma.
north: North angle from Y direction (default: 0).
is_leap_year: A boolean to indicate if hours are for a leap year
(default: False).
"""
return cls.from_location_sun_up_hours(EPW(epw_file).location, sun_up_hours,
north, is_leap_year)
@property
def isAnalemma(self):
"""Return True."""
return True
@property
def is_climate_based(self):
"""Return True if generated based on values from weather file."""
return False
@property
def analemma_file(self):
"""Analemma file name.
Use this file to create the octree.
"""
return 'analemma.rad'
@property
def sunlist_file(self):
"""Sun list file name.
Use this file as the list of modifiers in rcontrib.
"""
return 'analemma.mod'
@property
def sun_vectors(self):
"""Return list of sun vectors."""
return self._sun_vectors
@property
def sun_up_hours(self):
"""Return list of hours for sun vectors."""
return self._sun_up_hours
def execute(self, working_dir):
fp = os.path.join(working_dir, self.analemma_file) # analemma file (geo and mat)
sfp = os.path.join(working_dir, self.sunlist_file) # modifier list
with open(fp, writemode) as outf, open(sfp, writemode) as outm:
for hoy, vector in zip(self.sun_up_hours, self.sun_vectors):
# use minute of the year to name sun positions
moy = int(round(hoy * 60))
mat = Light('sol_%06d' % moy, 1e6, 1e6, 1e6)
sun = Source('sun_%06d' % moy, vector, 0.533, mat)
outf.write(sun.to_rad_string(True).replace('\n', ' ') + '\n')
outm.write('sol_%06d\n' % moy)
def duplicate(self):
"""Duplicate this class."""
return Analemma(self.sun_vectors, self.sun_up_hours)
def to_rad_string(self):
"""Get the radiance command line as a string."""
raise AttributeError(
'analemma does not have a single line command. Try execute method.'
)
def to_json(self):
"""Convert analemma to a dictionary."""
return {'sun_vectors': self.sun_vectors, 'sun_up_hours': self.sun_up_hours}
def ToString(self):
"""Overwrite .NET ToString method."""
return self.__repr__()
def __repr__(self):
"""Analemma representation."""
return 'Analemma: #%d' % len(self.sun_vectors)
class AnalemmaReversed(Analemma):
"""Generate a radiance-based analemma.
Reversed Analemma reverses direction of input sun vectors. Use reversed Analemma for
radiation and daylight studies.
Analemma consists of two files:
1. *_reversed.ann file which includes sun geometries and materials.
2. *.mod file includes list of modifiers that are included in
*_reversed.ann file.
"""
@property
def analemma_file(self):
"""Analemma file name.
Use this file to create the octree.
"""
return 'analemma_reversed.rad'
def execute(self, working_dir):
fp = os.path.join(working_dir, self.analemma_file) # analemma file (geo and mat)
sfp = os.path.join(working_dir, self.sunlist_file) # modifier list
with open(fp, writemode) as outf, open(sfp, writemode) as outm:
for hoy, vector in zip(self.sun_up_hours, self.sun_vectors):
# use minute of the year to name sun positions
moy = int(round(hoy * 60))
# reverse sun vector
r_vector = tuple(-1 * i for i in vector)
mat = Light('sol_%06d' % moy, 1e6, 1e6, 1e6)
sun = Source('sun_%06d' % moy, r_vector, 0.533, mat)
outf.write(sun.to_rad_string(True).replace('\n', ' ') + '\n')
outm.write('sol_%06d\n' % moy)
| ladybug-analysis-tools/honeybee | honeybee_plus/radiance/sky/analemma.py | Python | gpl-3.0 | 9,941 |
#!/usr/bin/env python
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Read samples from a UHD device and write to file formatted as binary
outputs single precision complex float values or complex short values
(interleaved 16 bit signed short integers).
"""
from gnuradio import gr, eng_notation
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
n2s = eng_notation.num_to_str
class rx_cfile_block(gr.top_block):
def __init__(self, options, filename):
gr.top_block.__init__(self)
# Create a UHD device source
if options.output_shorts:
self._u = uhd.usrp_source(device_addr=options.address,
io_type=uhd.io_type.COMPLEX_INT16,
num_channels=1)
self._sink = gr.file_sink(gr.sizeof_short*2, filename)
else:
self._u = uhd.usrp_source(device_addr=options.address,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1)
self._sink = gr.file_sink(gr.sizeof_gr_complex, filename)
# Set receiver sample rate
self._u.set_samp_rate(options.samp_rate)
# Set receive daughterboard gain
if options.gain is None:
g = self._u.get_gain_range()
options.gain = float(g.start()+g.stop())/2
print "Using mid-point gain of", options.gain, "(", g.start(), "-", g.stop(), ")"
self._u.set_gain(options.gain)
# Set the antenna
if(options.antenna):
self._u.set_antenna(options.antenna, 0)
# Set frequency (tune request takes lo_offset)
if(options.lo_offset is not None):
treq = uhd.tune_request(options.freq, options.lo_offset)
else:
treq = uhd.tune_request(options.freq)
tr = self._u.set_center_freq(treq)
if tr == None:
sys.stderr.write('Failed to set center frequency\n')
raise SystemExit, 1
# Create head block if needed and wire it up
if options.nsamples is None:
self.connect(self._u, self._sink)
else:
if options.output_shorts:
self._head = gr.head(gr.sizeof_short*2, int(options.nsamples))
else:
self._head = gr.head(gr.sizeof_gr_complex, int(options.nsamples))
self.connect(self._u, self._head, self._sink)
input_rate = self._u.get_samp_rate()
if options.verbose:
print "Address:", options.address
print "Rx gain:", options.gain
print "Rx baseband frequency:", n2s(tr.actual_rf_freq)
print "Rx DDC frequency:", n2s(tr.actual_dsp_freq)
print "Rx Sample Rate:", n2s(input_rate)
if options.nsamples is None:
print "Receiving samples until Ctrl-C"
else:
print "Receving", n2s(options.nsamples), "samples"
if options.output_shorts:
print "Writing 16-bit complex shorts"
else:
print "Writing 32-bit complex floats"
print "Output filename:", filename
def get_options():
usage="%prog: [options] output_filename"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-a", "--address", type="string", default="addr=192.168.10.2",
help="Address of UHD device, [default=%default]")
parser.add_option("-A", "--antenna", type="string", default=None,
help="select Rx Antenna where appropriate")
parser.add_option("", "--samp-rate", type="eng_float", default=1e6,
help="set sample rate (bandwidth) [default=%default]")
parser.add_option("-f", "--freq", type="eng_float", default=None,
help="set frequency to FREQ", metavar="FREQ")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option( "-s","--output-shorts", action="store_true", default=False,
help="output interleaved shorts instead of complex floats")
parser.add_option("-N", "--nsamples", type="eng_float", default=None,
help="number of samples to collect [default=+inf]")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output")
parser.add_option("", "--lo-offset", type="eng_float", default=None,
help="set daughterboard LO offset to OFFSET [default=hw default]")
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
if options.freq is None:
parser.print_help()
sys.stderr.write('You must specify the frequency with -f FREQ\n');
raise SystemExit, 1
return (options, args[0])
if __name__ == '__main__':
(options, filename) = get_options()
tb = rx_cfile_block(options, filename)
try:
tb.run()
except KeyboardInterrupt:
pass
| tta/gnuradio-tta | gr-uhd/apps/uhd_rx_cfile.py | Python | gpl-3.0 | 5,930 |
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QColor
from volumina.api import LazyflowSource, ColortableLayer, AlphaModulatedLayer
from ilastik.applets.dataExport.dataExportGui import DataExportGui, DataExportLayerViewerGui
from lazyflow.operators import OpMultiArraySlicer2
from ilastik.utility.exportingOperator import ExportingGui
class ObjectClassificationDataExportGui( DataExportGui, ExportingGui ):
"""
A subclass of the generic data export gui that creates custom layer viewers.
"""
def __init__(self, *args, **kwargs):
super(ObjectClassificationDataExportGui, self).__init__(*args, **kwargs)
self._exporting_operator = None
def set_exporting_operator(self, op):
self._exporting_operator = op
def get_exporting_operator(self, lane=0):
return self._exporting_operator.getLane(lane)
def createLayerViewer(self, opLane):
return ObjectClassificationResultsViewer(self.parentApplet, opLane)
def get_export_dialog_title(self):
return "Export Object Information"
@property
def gui_applet(self):
return self.parentApplet
def get_raw_shape(self):
return self.get_exporting_operator().RawImages.meta.shape
def get_feature_names(self):
return self.get_exporting_operator().ComputedFeatureNames([]).wait()
def _initAppletDrawerUic(self):
super(ObjectClassificationDataExportGui, self)._initAppletDrawerUic()
from PyQt4.QtGui import QGroupBox, QPushButton, QVBoxLayout
group = QGroupBox("Export Object Feature Table", self.drawer)
group.setLayout(QVBoxLayout())
self.drawer.layout().addWidget(group)
btn = QPushButton("Configure and export", group)
btn.clicked.connect(self.show_export_dialog)
group.layout().addWidget(btn)
def _createDefault16ColorColorTable():
colors = []
# Transparent for the zero label
colors.append(QColor(0,0,0,0))
# ilastik v0.5 colors
colors.append( QColor( Qt.red ) )
colors.append( QColor( Qt.green ) )
colors.append( QColor( Qt.yellow ) )
colors.append( QColor( Qt.blue ) )
colors.append( QColor( Qt.magenta ) )
colors.append( QColor( Qt.darkYellow ) )
colors.append( QColor( Qt.lightGray ) )
# Additional colors
colors.append( QColor(255, 105, 180) ) #hot pink
colors.append( QColor(102, 205, 170) ) #dark aquamarine
colors.append( QColor(165, 42, 42) ) #brown
colors.append( QColor(0, 0, 128) ) #navy
colors.append( QColor(255, 165, 0) ) #orange
colors.append( QColor(173, 255, 47) ) #green-yellow
colors.append( QColor(128,0, 128) ) #purple
colors.append( QColor(240, 230, 140) ) #khaki
# colors.append( QColor(192, 192, 192) ) #silver
# colors.append( QColor(69, 69, 69) ) # dark grey
# colors.append( QColor( Qt.cyan ) )
assert len(colors) == 16
return [c.rgba() for c in colors]
class ObjectClassificationResultsViewer(DataExportLayerViewerGui):
_colorTable16 = _createDefault16ColorColorTable()
def setupLayers(self):
layers = []
opLane = self.topLevelOperatorView
selection_names = opLane.SelectionNames.value
selection = selection_names[ opLane.InputSelection.value ]
# This code depends on a specific order for the export slots.
# If those change, update this function!
assert selection in ['Object Predictions', 'Object Probabilities', 'Pixel Probabilities']
if selection == "Object Predictions":
fromDiskSlot = self.topLevelOperatorView.ImageOnDisk
if fromDiskSlot.ready():
exportLayer = ColortableLayer( LazyflowSource(fromDiskSlot), colorTable=self._colorTable16 )
exportLayer.name = "Prediction - Exported"
exportLayer.visible = True
layers.append(exportLayer)
previewSlot = self.topLevelOperatorView.ImageToExport
if previewSlot.ready():
previewLayer = ColortableLayer( LazyflowSource(previewSlot), colorTable=self._colorTable16 )
previewLayer.name = "Prediction - Preview"
previewLayer.visible = False
layers.append(previewLayer)
elif selection == "Object Probabilities":
exportedLayers = self._initPredictionLayers(opLane.ImageOnDisk)
for layer in exportedLayers:
layer.visible = True
layer.name = layer.name + "- Exported"
layers += exportedLayers
previewLayers = self._initPredictionLayers(opLane.ImageToExport)
for layer in previewLayers:
layer.visible = False
layer.name = layer.name + "- Preview"
layers += previewLayers
elif selection == 'Pixel Probabilities':
exportedLayers = self._initPredictionLayers(opLane.ImageOnDisk)
for layer in exportedLayers:
layer.visible = True
layer.name = layer.name + "- Exported"
layers += exportedLayers
previewLayers = self._initPredictionLayers(opLane.ImageToExport)
for layer in previewLayers:
layer.visible = False
layer.name = layer.name + "- Preview"
layers += previewLayers
else:
assert False, "Unknown selection."
rawSlot = self.topLevelOperatorView.RawData
if rawSlot.ready():
rawLayer = self.createStandardLayerFromSlot(rawSlot)
rawLayer.name = "Raw Data"
rawLayer.opacity = 1.0
layers.append(rawLayer)
return layers
def _initPredictionLayers(self, predictionSlot):
layers = []
opLane = self.topLevelOperatorView
# Use a slicer to provide a separate slot for each channel layer
opSlicer = OpMultiArraySlicer2( parent=opLane.viewed_operator().parent )
opSlicer.Input.connect( predictionSlot )
opSlicer.AxisFlag.setValue('c')
for channel, channelSlot in enumerate(opSlicer.Slices):
if channelSlot.ready():
drange = channelSlot.meta.drange or (0.0, 1.0)
predictsrc = LazyflowSource(channelSlot)
predictLayer = AlphaModulatedLayer( predictsrc,
tintColor=QColor.fromRgba(self._colorTable16[channel+1]),
# FIXME: This is weird. Why are range and normalize both set to the same thing?
range=drange,
normalize=drange )
predictLayer.opacity = 1.0
predictLayer.visible = True
predictLayer.name = "Probability Channel #{}".format( channel+1 )
layers.append(predictLayer)
return layers | nielsbuwen/ilastik | ilastik/applets/objectClassification/objectClassificationDataExportGui.py | Python | gpl-3.0 | 7,930 |
# -*- coding: utf-8 -*-
"""
sphinx.environment.managers.toctree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Toctree manager for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from six import iteritems
from docutils import nodes
from sphinx import addnodes
from sphinx.util import url_re
from sphinx.util.nodes import clean_astext, process_only_nodes
from sphinx.transforms import SphinxContentsFilter
from sphinx.environment.managers import EnvironmentManager
class Toctree(EnvironmentManager):
name = 'toctree'
def __init__(self, env):
super(Toctree, self).__init__(env)
self.tocs = env.tocs
self.toc_num_entries = env.toc_num_entries
self.toc_secnumbers = env.toc_secnumbers
self.toc_fignumbers = env.toc_fignumbers
self.toctree_includes = env.toctree_includes
self.files_to_rebuild = env.files_to_rebuild
self.glob_toctrees = env.glob_toctrees
self.numbered_toctrees = env.numbered_toctrees
def clear_doc(self, docname):
self.tocs.pop(docname, None)
self.toc_secnumbers.pop(docname, None)
self.toc_fignumbers.pop(docname, None)
self.toc_num_entries.pop(docname, None)
self.toctree_includes.pop(docname, None)
self.glob_toctrees.discard(docname)
self.numbered_toctrees.discard(docname)
for subfn, fnset in list(self.files_to_rebuild.items()):
fnset.discard(docname)
if not fnset:
del self.files_to_rebuild[subfn]
def merge_other(self, docnames, other):
for docname in docnames:
self.tocs[docname] = other.tocs[docname]
self.toc_num_entries[docname] = other.toc_num_entries[docname]
if docname in other.toctree_includes:
self.toctree_includes[docname] = other.toctree_includes[docname]
if docname in other.glob_toctrees:
self.glob_toctrees.add(docname)
if docname in other.numbered_toctrees:
self.numbered_toctrees.add(docname)
for subfn, fnset in other.files_to_rebuild.items():
self.files_to_rebuild.setdefault(subfn, set()).update(fnset & docnames)
def process_doc(self, docname, doctree):
"""Build a TOC from the doctree and store it in the inventory."""
numentries = [0] # nonlocal again...
def traverse_in_section(node, cls):
"""Like traverse(), but stay within the same section."""
result = []
if isinstance(node, cls):
result.append(node)
for child in node.children:
if isinstance(child, nodes.section):
continue
result.extend(traverse_in_section(child, cls))
return result
def build_toc(node, depth=1):
entries = []
for sectionnode in node:
# find all toctree nodes in this section and add them
# to the toc (just copying the toctree node which is then
# resolved in self.get_and_resolve_doctree)
if isinstance(sectionnode, addnodes.only):
onlynode = addnodes.only(expr=sectionnode['expr'])
blist = build_toc(sectionnode, depth)
if blist:
onlynode += blist.children
entries.append(onlynode)
continue
if not isinstance(sectionnode, nodes.section):
for toctreenode in traverse_in_section(sectionnode,
addnodes.toctree):
item = toctreenode.copy()
entries.append(item)
# important: do the inventory stuff
self.note_toctree(docname, toctreenode)
continue
title = sectionnode[0]
# copy the contents of the section title, but without references
# and unnecessary stuff
visitor = SphinxContentsFilter(doctree)
title.walkabout(visitor)
nodetext = visitor.get_entry_text()
if not numentries[0]:
# for the very first toc entry, don't add an anchor
# as it is the file's title anyway
anchorname = ''
else:
anchorname = '#' + sectionnode['ids'][0]
numentries[0] += 1
# make these nodes:
# list_item -> compact_paragraph -> reference
reference = nodes.reference(
'', '', internal=True, refuri=docname,
anchorname=anchorname, *nodetext)
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
sub_item = build_toc(sectionnode, depth + 1)
item += sub_item
entries.append(item)
if entries:
return nodes.bullet_list('', *entries)
return []
toc = build_toc(doctree)
if toc:
self.tocs[docname] = toc
else:
self.tocs[docname] = nodes.bullet_list('')
self.toc_num_entries[docname] = numentries[0]
def note_toctree(self, docname, toctreenode):
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
if toctreenode['glob']:
self.glob_toctrees.add(docname)
if toctreenode.get('numbered'):
self.numbered_toctrees.add(docname)
includefiles = toctreenode['includefiles']
for includefile in includefiles:
# note that if the included file is rebuilt, this one must be
# too (since the TOC of the included file could have changed)
self.files_to_rebuild.setdefault(includefile, set()).add(docname)
self.toctree_includes.setdefault(docname, []).extend(includefiles)
def get_toc_for(self, docname, builder):
"""Return a TOC nodetree -- for use on the same page only!"""
tocdepth = self.env.metadata[docname].get('tocdepth', 0)
try:
toc = self.tocs[docname].deepcopy()
self._toctree_prune(toc, 2, tocdepth)
except KeyError:
# the document does not exist anymore: return a dummy node that
# renders to nothing
return nodes.paragraph()
process_only_nodes(toc, builder.tags, warn_node=self.env.warn_node)
for node in toc.traverse(nodes.reference):
node['refuri'] = node['anchorname'] or '#'
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
"""Return the global TOC nodetree."""
doctree = self.env.get_doctree(self.env.config.master_doc)
toctrees = []
if 'includehidden' not in kwds:
kwds['includehidden'] = True
if 'maxdepth' not in kwds:
kwds['maxdepth'] = 0
kwds['collapse'] = collapse
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = self.env.resolve_toctree(docname, builder, toctreenode,
prune=True, **kwds)
if toctree:
toctrees.append(toctree)
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
result.extend(toctree.children)
return result
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
to the value of the *maxdepth* option on the *toctree* node.
If *titles_only* is True, only toplevel document titles will be in the
resulting tree.
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
if toctree.get('hidden', False) and not includehidden:
return None
# For reading the following two helper function, it is useful to keep
# in mind the node structure of a toctree (using HTML-like node names
# for brevity):
#
# <ul>
# <li>
# <p><a></p>
# <p><a></p>
# ...
# <ul>
# ...
# </ul>
# </li>
# </ul>
#
# The transformation is made in two passes in order to avoid
# interactions between marking and pruning the tree (see bug #1046).
toctree_ancestors = self.get_toctree_ancestors(docname)
def _toctree_add_classes(node, depth):
"""Add 'toctree-l%d' and 'current' classes to the toctree."""
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)):
# for <p> and <li>, indicate the depth level and recurse
subnode['classes'].append('toctree-l%d' % (depth-1))
_toctree_add_classes(subnode, depth)
elif isinstance(subnode, nodes.bullet_list):
# for <ul>, just recurse
_toctree_add_classes(subnode, depth+1)
elif isinstance(subnode, nodes.reference):
# for <a>, identify which entries point to the current
# document and therefore may not be collapsed
if subnode['refuri'] == docname:
if not subnode['anchorname']:
# give the whole branch a 'current' class
# (useful for styling it differently)
branchnode = subnode
while branchnode:
branchnode['classes'].append('current')
branchnode = branchnode.parent
# mark the list_item as "on current page"
if subnode.parent.parent.get('iscurrent'):
# but only if it's not already done
return
while subnode:
subnode['iscurrent'] = True
subnode = subnode.parent
def _entries_from_toctree(toctreenode, parents,
separate=False, subtree=False):
"""Return TOC entries for a toctree node."""
refs = [(e[0], e[1]) for e in toctreenode['entries']]
entries = []
for (title, ref) in refs:
try:
refdoc = None
if url_re.match(ref):
if title is None:
title = ref
reference = nodes.reference('', '', internal=False,
refuri=ref, anchorname='',
*[nodes.Text(title)])
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
toc = nodes.bullet_list('', item)
elif ref == 'self':
# 'self' refers to the document from which this
# toctree originates
ref = toctreenode['parent']
if not title:
title = clean_astext(self.env.titles[ref])
reference = nodes.reference('', '', internal=True,
refuri=ref,
anchorname='',
*[nodes.Text(title)])
para = addnodes.compact_paragraph('', '', reference)
item = nodes.list_item('', para)
# don't show subitems
toc = nodes.bullet_list('', item)
else:
if ref in parents:
self.env.warn(ref, 'circular toctree references '
'detected, ignoring: %s <- %s' %
(ref, ' <- '.join(parents)))
continue
refdoc = ref
toc = self.tocs[ref].deepcopy()
maxdepth = self.env.metadata[ref].get('tocdepth', 0)
if ref not in toctree_ancestors or (prune and maxdepth > 0):
self._toctree_prune(toc, 2, maxdepth, collapse)
process_only_nodes(toc, builder.tags, warn_node=self.env.warn_node)
if title and toc.children and len(toc.children) == 1:
child = toc.children[0]
for refnode in child.traverse(nodes.reference):
if refnode['refuri'] == ref and \
not refnode['anchorname']:
refnode.children = [nodes.Text(title)]
if not toc.children:
# empty toc means: no titles will show up in the toctree
self.env.warn_node(
'toctree contains reference to document %r that '
'doesn\'t have a title: no link will be generated'
% ref, toctreenode)
except KeyError:
# this is raised if the included file does not exist
self.env.warn_node(
'toctree contains reference to nonexisting document %r'
% ref, toctreenode)
else:
# if titles_only is given, only keep the main title and
# sub-toctrees
if titles_only:
# delete everything but the toplevel title(s)
# and toctrees
for toplevel in toc:
# nodes with length 1 don't have any children anyway
if len(toplevel) > 1:
subtrees = toplevel.traverse(addnodes.toctree)
if subtrees:
toplevel[1][:] = subtrees
else:
toplevel.pop(1)
# resolve all sub-toctrees
for subtocnode in toc.traverse(addnodes.toctree):
if not (subtocnode.get('hidden', False) and
not includehidden):
i = subtocnode.parent.index(subtocnode) + 1
for item in _entries_from_toctree(
subtocnode, [refdoc] + parents,
subtree=True):
subtocnode.parent.insert(i, item)
i += 1
subtocnode.parent.remove(subtocnode)
if separate:
entries.append(toc)
else:
entries.extend(toc.children)
if not subtree and not separate:
ret = nodes.bullet_list()
ret += entries
return [ret]
return entries
maxdepth = maxdepth or toctree.get('maxdepth', -1)
if not titles_only and toctree.get('titlesonly', False):
titles_only = True
if not includehidden and toctree.get('includehidden', False):
includehidden = True
# NOTE: previously, this was separate=True, but that leads to artificial
# separation when two or more toctree entries form a logical unit, so
# separating mode is no longer used -- it's kept here for history's sake
tocentries = _entries_from_toctree(toctree, [], separate=False)
if not tocentries:
return None
newnode = addnodes.compact_paragraph('', '')
caption = toctree.attributes.get('caption')
if caption:
caption_node = nodes.caption(caption, '', *[nodes.Text(caption)])
caption_node.line = toctree.line
caption_node.source = toctree.source
caption_node.rawsource = toctree['rawcaption']
if hasattr(toctree, 'uid'):
# move uid to caption_node to translate it
caption_node.uid = toctree.uid
del toctree.uid
newnode += caption_node
newnode.extend(tocentries)
newnode['toctree'] = True
# prune the tree to maxdepth, also set toc depth and current classes
_toctree_add_classes(newnode, 1)
self._toctree_prune(newnode, 1, prune and maxdepth or 0, collapse)
if len(newnode[-1]) == 0: # No titles found
return None
# set the target paths in the toctrees (they are not known at TOC
# generation time)
for refnode in newnode.traverse(nodes.reference):
if not url_re.match(refnode['refuri']):
refnode['refuri'] = builder.get_relative_uri(
docname, refnode['refuri']) + refnode['anchorname']
return newnode
def get_toctree_ancestors(self, docname):
parent = {}
for p, children in iteritems(self.toctree_includes):
for child in children:
parent[child] = p
ancestors = []
d = docname
while d in parent and d not in ancestors:
ancestors.append(d)
d = parent[d]
return ancestors
def _toctree_prune(self, node, depth, maxdepth, collapse=False):
"""Utility: Cut a TOC at a specified depth."""
for subnode in node.children[:]:
if isinstance(subnode, (addnodes.compact_paragraph,
nodes.list_item)):
# for <p> and <li>, just recurse
self._toctree_prune(subnode, depth, maxdepth, collapse)
elif isinstance(subnode, nodes.bullet_list):
# for <ul>, determine if the depth is too large or if the
# entry is to be collapsed
if maxdepth > 0 and depth > maxdepth:
subnode.parent.replace(subnode, [])
else:
# cull sub-entries whose parents aren't 'current'
if (collapse and depth > 1 and
'iscurrent' not in subnode.parent):
subnode.parent.remove(subnode)
else:
# recurse on visible children
self._toctree_prune(subnode, depth+1, maxdepth, collapse)
def assign_section_numbers(self):
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
assigned = set()
old_secnumbers = self.toc_secnumbers
self.toc_secnumbers = self.env.toc_secnumbers = {}
def _walk_toc(node, secnums, depth, titlenode=None):
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
if isinstance(subnode, nodes.bullet_list):
numstack.append(0)
_walk_toc(subnode, secnums, depth-1, titlenode)
numstack.pop()
titlenode = None
elif isinstance(subnode, nodes.list_item):
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.only):
# at this stage we don't know yet which sections are going
# to be included; just include all of them, even if it leads
# to gaps in the numbering
_walk_toc(subnode, secnums, depth, titlenode)
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
numstack[-1] += 1
if depth > 0:
number = tuple(numstack)
else:
number = None
secnums[subnode[0]['anchorname']] = \
subnode[0]['secnumber'] = number
if titlenode:
titlenode['secnumber'] = number
titlenode = None
elif isinstance(subnode, addnodes.toctree):
_walk_toctree(subnode, depth)
def _walk_toctree(toctreenode, depth):
if depth == 0:
return
for (title, ref) in toctreenode['entries']:
if url_re.match(ref) or ref == 'self' or ref in assigned:
# don't mess with those
continue
if ref in self.tocs:
secnums = self.toc_secnumbers[ref] = {}
assigned.add(ref)
_walk_toc(self.tocs[ref], secnums, depth,
self.env.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
for docname in self.numbered_toctrees:
assigned.add(docname)
doctree = self.env.get_doctree(docname)
for toctreenode in doctree.traverse(addnodes.toctree):
depth = toctreenode.get('numbered', 0)
if depth:
# every numbered toctree gets new numbering
numstack = [0]
_walk_toctree(toctreenode, depth)
return rewrite_needed
def assign_figure_numbers(self):
"""Assign a figure number to each figure under a numbered toctree."""
rewrite_needed = []
assigned = set()
old_fignumbers = self.toc_fignumbers
self.toc_fignumbers = self.env.toc_fignumbers = {}
fignum_counter = {}
def get_section_number(docname, section):
anchorname = '#' + section['ids'][0]
secnumbers = self.toc_secnumbers.get(docname, {})
if anchorname in secnumbers:
secnum = secnumbers.get(anchorname)
else:
secnum = secnumbers.get('')
return secnum or tuple()
def get_next_fignumber(figtype, secnum):
counter = fignum_counter.setdefault(figtype, {})
secnum = secnum[:self.env.config.numfig_secnum_depth]
counter[secnum] = counter.get(secnum, 0) + 1
return secnum + (counter[secnum],)
def register_fignumber(docname, secnum, figtype, fignode):
self.toc_fignumbers.setdefault(docname, {})
fignumbers = self.toc_fignumbers[docname].setdefault(figtype, {})
figure_id = fignode['ids'][0]
fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
def _walk_doctree(docname, doctree, secnum):
for subnode in doctree.children:
if isinstance(subnode, nodes.section):
next_secnum = get_section_number(docname, subnode)
if next_secnum:
_walk_doctree(docname, subnode, next_secnum)
else:
_walk_doctree(docname, subnode, secnum)
continue
elif isinstance(subnode, addnodes.toctree):
for title, subdocname in subnode['entries']:
if url_re.match(subdocname) or subdocname == 'self':
# don't mess with those
continue
_walk_doc(subdocname, secnum)
continue
figtype = self.env.domains['std'].get_figtype(subnode)
if figtype and subnode['ids']:
register_fignumber(docname, secnum, figtype, subnode)
_walk_doctree(docname, subnode, secnum)
def _walk_doc(docname, secnum):
if docname not in assigned:
assigned.add(docname)
doctree = self.env.get_doctree(docname)
_walk_doctree(docname, doctree, secnum)
if self.env.config.numfig:
_walk_doc(self.env.config.master_doc, tuple())
for docname, fignums in iteritems(self.toc_fignumbers):
if fignums != old_fignumbers.get(docname):
rewrite_needed.append(docname)
return rewrite_needed
| bgris/ODL_bgris | lib/python3.5/site-packages/Sphinx-1.5.1-py3.5.egg/sphinx/environment/managers/toctree.py | Python | gpl-3.0 | 25,403 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.template.defaultfilters import register
from appointment.constants import EVENT_STATUS, ALARM_STATUS, ALARM_METHOD
@register.filter(name='event_status')
def event_status(value):
"""Event Status Templatetag"""
if not value:
return ''
STATUS = dict(EVENT_STATUS)
try:
return STATUS[value].encode('utf-8')
except:
return ''
@register.filter(name='alarm_status')
def alarm_status(value):
"""Alarm Status Templatetag"""
if not value:
return ''
STATUS = dict(ALARM_STATUS)
try:
return STATUS[value].encode('utf-8')
except:
return ''
@register.filter(name='alarm_method')
def alarm_method(value):
"""Alarm Method Templatetag"""
if not value:
return ''
METHOD = dict(ALARM_METHOD)
try:
return METHOD[value].encode('utf-8')
except:
return ''
| gale320/newfies-dialer | newfies/appointment/templatetags/appointment_tags.py | Python | mpl-2.0 | 1,283 |
#-*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <[email protected]>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from datetime import datetime, date, timedelta
from dateutil.relativedelta import relativedelta
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
class hr_employee(osv.Model):
_inherit = 'hr.employee'
def _get_contracts_list(self, employee):
'''Return list of contracts in chronological order'''
contracts = []
for c in employee.contract_ids:
l = len(contracts)
if l == 0:
contracts.append(c)
else:
dCStart = datetime.strptime(c.date_start, OE_DATEFORMAT).date()
i = l - 1
while i >= 0:
dContractStart = datetime.strptime(
contracts[i].date_start, OE_DATEFORMAT).date()
if dContractStart < dCStart:
contracts = contracts[:i + 1] + [c] + contracts[i + 1:]
break
elif i == 0:
contracts = [c] + contracts
i -= 1
return contracts
def _get_days_in_month(self, d):
last_date = d - timedelta(days=(d.day - 1)) + relativedelta(
months= +1) + relativedelta(days= -1)
return last_date.day
def get_months_service_to_date(self, cr, uid, ids, dToday=None, context=None):
'''Returns a dictionary of floats. The key is the employee id, and the value is
number of months of employment.'''
res = dict.fromkeys(ids, 0)
if dToday == None:
dToday = date.today()
for ee in self.pool.get('hr.employee').browse(cr, uid, ids, context=context):
delta = relativedelta(dToday, dToday)
contracts = self._get_contracts_list(ee)
if len(contracts) == 0:
res[ee.id] = (0.0, False)
continue
dInitial = datetime.strptime(
contracts[0].date_start, OE_DATEFORMAT).date()
if ee.initial_employment_date:
dFirstContract = dInitial
dInitial = datetime.strptime(
ee.initial_employment_date, '%Y-%m-%d').date()
if dFirstContract < dInitial:
raise osv.except_osv(_('Employment Date mismatch!'),
_('The initial employment date cannot be after the first contract in the system.\nEmployee: %s', ee.name))
delta = relativedelta(dFirstContract, dInitial)
for c in contracts:
dStart = datetime.strptime(c.date_start, '%Y-%m-%d').date()
if dStart >= dToday:
continue
# If the contract doesn't have an end date, use today's date
# If the contract has finished consider the entire duration of
# the contract, otherwise consider only the months in the
# contract until today.
#
if c.date_end:
dEnd = datetime.strptime(c.date_end, '%Y-%m-%d').date()
else:
dEnd = dToday
if dEnd > dToday:
dEnd = dToday
delta += relativedelta(dEnd, dStart)
# Set the number of months the employee has worked
date_part = float(delta.days) / float(
self._get_days_in_month(dInitial))
res[ee.id] = (
float((delta.years * 12) + delta.months) + date_part, dInitial)
return res
def _get_employed_months(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
_res = self.get_months_service_to_date(cr, uid, ids, context=context)
for k, v in _res.iteritems():
res[k] = v[0]
return res
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2], (list, tuple)):
if cond[1] in ['in', 'not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select id from hr_employee having %s %%s" %
(cond[1]), (amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
_columns = {
'initial_employment_date': fields.date('Initial Date of Employment', groups=False,
help='Date of first employment if it was before the start of the first contract in the system.'),
'length_of_service': fields.function(_get_employed_months, type='float', method=True,
groups=False,
string='Lenght of Service'),
}
| bwrsandman/openerp-hr | hr_employee_seniority/hr.py | Python | agpl-3.0 | 5,976 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date, datetime
from dateutil import relativedelta
import json
import time
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare, float_round
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from openerp.exceptions import Warning
from openerp import SUPERUSER_ID, api
import openerp.addons.decimal_precision as dp
from openerp.addons.procurement import procurement
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."),
}
_defaults = {
'active': True,
}
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Inventory Locations"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
_rec_name = 'complete_name'
def _location_owner(self, cr, uid, location, context=None):
''' Return the company owning the location if any '''
return location and (location.usage == 'internal') and location.company_id or False
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.location_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.location_id
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
if context is None:
context = {}
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive)
def _name_get(self, cr, uid, location, context=None):
name = location.name
while location.location_id and location.usage != 'view':
location = location.location_id
name = location.name + '/' + name
return name
def name_get(self, cr, uid, ids, context=None):
res = []
for location in self.browse(cr, uid, ids, context=context):
res.append((location.id, self._name_get(cr, uid, location, context=context)))
return res
_columns = {
'name': fields.char('Location Name', required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([
('supplier', 'Supplier Location'),
('view', 'View'),
('internal', 'Internal Location'),
('customer', 'Customer Location'),
('inventory', 'Inventory'),
('procurement', 'Procurement'),
('production', 'Production'),
('transit', 'Transit Location')],
'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
\n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations
""", select=True),
'complete_name': fields.function(_complete_name, type='char', string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'),
'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."),
'loc_barcode': fields.char('Location Barcode'),
}
_defaults = {
'active': True,
'usage': 'internal',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'scrap_location': False,
}
_sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')]
def create(self, cr, uid, default, context=None):
if not default.get('loc_barcode', False):
default.update({'loc_barcode': default.get('complete_name', False)})
return super(stock_location, self).create(cr, uid, default, context=context)
def get_putaway_strategy(self, cr, uid, location, product, context=None):
''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.'''
putaway_obj = self.pool.get('product.putaway')
loc = location
while loc:
if loc.putaway_strategy_id:
res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context)
if res:
return res
loc = loc.location_id
def _default_removal_strategy(self, cr, uid, context=None):
return 'fifo'
def get_removal_strategy(self, cr, uid, location, product, context=None):
''' Returns the removal strategy to consider for the given product and location.
:param location: browse record (stock.location)
:param product: browse record (product.product)
:rtype: char
'''
if product.categ_id.removal_strategy_id:
return product.categ_id.removal_strategy_id.method
loc = location
while loc:
if loc.removal_strategy_id:
return loc.removal_strategy_id.method
loc = loc.location_id
return self._default_removal_strategy(cr, uid, context=context)
def get_warehouse(self, cr, uid, location, context=None):
"""
Returns warehouse id of warehouse that contains location
:param location: browse record (stock.location)
"""
wh_obj = self.pool.get("stock.warehouse")
whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left),
('view_location_id.parent_right', '>=', location.parent_left)], context=context)
return whs and whs[0] or False
#----------------------------------------------------------
# Routes
#----------------------------------------------------------
class stock_location_route(osv.osv):
_name = 'stock.location.route'
_description = "Inventory Routes"
_order = 'sequence'
_columns = {
'name': fields.char('Route Name', required=True),
'sequence': fields.integer('Sequence'),
'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."),
'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True),
'product_selectable': fields.boolean('Applicable on Product'),
'product_categ_selectable': fields.boolean('Applicable on Product Category'),
'warehouse_selectable': fields.boolean('Applicable on Warehouse'),
'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'),
'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'),
}
_defaults = {
'sequence': lambda self, cr, uid, ctx: 0,
'active': True,
'product_selectable': True,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c),
}
def write(self, cr, uid, ids, vals, context=None):
'''when a route is deactivated, deactivate also its pull and push rules'''
if isinstance(ids, (int, long)):
ids = [ids]
res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
push_ids = []
pull_ids = []
for route in self.browse(cr, uid, ids, context=context):
if route.push_ids:
push_ids += [r.id for r in route.push_ids if r.active != vals['active']]
if route.pull_ids:
pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']]
if push_ids:
self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context)
if pull_ids:
self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context)
return res
#----------------------------------------------------------
# Quants
#----------------------------------------------------------
class stock_quant(osv.osv):
"""
Quants are the smallest unit of stock physical instances
"""
_name = "stock.quant"
_description = "Quants"
def _get_quant_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for q in self.browse(cr, uid, ids, context=context):
res[q.id] = q.product_id.code or ''
if q.lot_id:
res[q.id] = q.lot_id.name
res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name
return res
def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None):
context = dict(context or {})
res = {}
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for quant in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if quant.company_id.id != uid_company_id:
#if the company of the quant is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = quant.company_id.id
quant = self.browse(cr, uid, quant.id, context=context)
res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context)
return res
def _get_inventory_value(self, cr, uid, quant, context=None):
return quant.product_id.standard_price * quant.qty
_columns = {
'name': fields.function(_get_quant_name, type='char', string='Identifier'),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True),
'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True),
'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True),
'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True),
'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True),
'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"),
'cost': fields.float('Unit Cost'),
'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True),
'create_date': fields.datetime('Creation Date', readonly=True),
'in_date': fields.datetime('Incoming Date', readonly=True, select=True),
'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False),
'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True),
'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True),
# Used for negative quants to reconcile after compensated by a new positive one
'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True),
'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True),
'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True,
help="Technical field used to record the destination location of a move that created a negative quant"),
}
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c),
}
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)')
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by'''
res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
if 'inventory_value' in fields:
for line in res:
if '__domain' in line:
lines = self.search(cr, uid, line['__domain'], context=context)
inv_value = 0.0
for line2 in self.browse(cr, uid, lines, context=context):
inv_value += line2.inventory_value
line['inventory_value'] = inv_value
return res
def action_view_quant_history(self, cr, uid, ids, context=None):
'''
This function returns an action that display the history of the quant, which
mean all the stock moves that lead to this quant creation with this quant quantity.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context={})[0]
move_ids = []
for quant in self.browse(cr, uid, ids, context=context):
move_ids += [move.id for move in quant.history_ids]
result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]"
return result
def quants_reserve(self, cr, uid, quants, move, link=False, context=None):
'''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state
is also set to 'assigned'
:param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument
:param move: browse record
:param link: browse record (stock.move.operation.link)
'''
toreserve = []
reserved_availability = move.reserved_availability
#split quants if needed
for quant, qty in quants:
if qty <= 0.0 or (quant and quant.qty <= 0.0):
raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.'))
if not quant:
continue
self._quant_split(cr, uid, quant, qty, context=context)
toreserve.append(quant.id)
reserved_availability += quant.qty
#reserve quants
if toreserve:
self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context)
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
#check if move'state needs to be set as 'assigned'
rounding = move.product_id.uom_id.rounding
if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') :
self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context)
elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available:
self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context)
def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None):
"""Moves all given stock.quant in the given destination location. Unreserve from current move.
:param quants: list of tuple(browse record(stock.quant) or None, quantity to move)
:param move: browse record (stock.move)
:param location_to: browse record (stock.location) depicting where the quants have to be moved
:param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created
:param lot_id: ID of the lot that must be set on the quants to move
:param owner_id: ID of the partner that must own the quants to move
:param src_package_id: ID of the package that contains the quants to move
:param dest_package_id: ID of the package that must be set on the moved quant
"""
quants_reconcile = []
to_move_quants = []
self._check_location(cr, uid, location_to, context=context)
for quant, qty in quants:
if not quant:
#If quant is None, we will create a quant to move (and potentially a negative counterpart too)
quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context)
else:
self._quant_split(cr, uid, quant, qty, context=context)
to_move_quants.append(quant)
quants_reconcile.append(quant)
if to_move_quants:
to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id]
self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context)
self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context)
if location_to.usage == 'internal':
# Do manual search for quant to avoid full table scan (order by id)
cr.execute("""
SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND
((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1
""", (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id))
if cr.fetchone():
for quant in quants_reconcile:
self._quant_reconcile_negative(cr, uid, quant, move, context=context)
def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None):
context=context or {}
vals = {'location_id': location_dest_id.id,
'history_ids': [(4, move.id)],
'reservation_id': False}
if not context.get('entire_pack'):
vals.update({'package_id': dest_package_id})
self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context)
def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None):
''' This function tries to find quants in the given location for the given domain, by trying to first limit
the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached
it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on).
Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal
'''
if domain is None:
domain = []
quants = [(None, qty)]
#don't look for quants in location that are of type production, supplier or inventory.
if location.usage in ['inventory', 'production', 'supplier']:
return quants
res_qty = qty
if not prefered_domain_list:
return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for prefered_domain in prefered_domain_list:
res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding)
if res_qty_cmp > 0:
#try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order
quants.pop()
tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context)
for quant in tmp_quants:
if quant[0]:
res_qty -= quant[1]
quants += tmp_quants
return quants
def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None):
"""
Use the removal strategies of product to search for the correct quants
If you inherit, put the super at the end of your method.
:location: browse record of the parent location where the quants have to be found
:product: browse record of the product to find
:qty in UoM of product
"""
result = []
domain = domain or [('qty', '>', 0.0)]
if restrict_partner_id:
domain += [('owner_id', '=', restrict_partner_id)]
if restrict_lot_id:
domain += [('lot_id', '=', restrict_lot_id)]
if location:
removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context)
result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context)
return result
def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None):
if removal_strategy == 'fifo':
order = 'in_date, id'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
elif removal_strategy == 'lifo':
order = 'in_date desc, id desc'
return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context)
raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,)))
def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False,
force_location_from=False, force_location_to=False, context=None):
'''Create a quant in the destination location and create a negative quant in the source location if it's an internal location.
'''
if context is None:
context = {}
price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context)
location = force_location_to or move.location_dest_id
rounding = move.product_id.uom_id.rounding
vals = {
'product_id': move.product_id.id,
'location_id': location.id,
'qty': float_round(qty, precision_rounding=rounding),
'cost': price_unit,
'history_ids': [(4, move.id)],
'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': move.company_id.id,
'lot_id': lot_id,
'owner_id': owner_id,
'package_id': dest_package_id,
}
if move.location_id.usage == 'internal':
#if we were trying to move something from an internal location and reach here (quant creation),
#it means that a negative quant has to be created as well.
negative_vals = vals.copy()
negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id
negative_vals['qty'] = float_round(-qty, precision_rounding=rounding)
negative_vals['cost'] = price_unit
negative_vals['negative_move_id'] = move.id
negative_vals['package_id'] = src_package_id
negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context)
vals.update({'propagated_from_id': negative_quant_id})
#create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants
quant_id = self.create(cr, SUPERUSER_ID, vals, context=context)
return self.browse(cr, uid, quant_id, context=context)
def _quant_split(self, cr, uid, quant, qty, context=None):
context = context or {}
rounding = quant.product_id.uom_id.rounding
if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely
return False
qty_round = float_round(qty, precision_rounding=rounding)
new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding)
# Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster)
cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,))
res = cr.fetchall()
new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context)
self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context)
return self.browse(cr, uid, new_quant, context=context)
def _get_latest_move(self, cr, uid, quant, context=None):
move = False
for m in quant.history_ids:
if not move or m.date > move.date:
move = m
return move
@api.cr_uid_ids_context
def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None):
path = []
for move in solving_quant.history_ids:
path.append((4, move.id))
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context)
def _quant_reconcile_negative(self, cr, uid, quant, move, context=None):
"""
When new quant arrive in a location, try to reconcile it with
negative quants. If it's possible, apply the cost of the new
quant to the conter-part of the negative quant.
"""
solving_quant = quant
dom = [('qty', '<', 0)]
if quant.lot_id:
dom += [('lot_id', '=', quant.lot_id.id)]
dom += [('owner_id', '=', quant.owner_id.id)]
dom += [('package_id', '=', quant.package_id.id)]
dom += [('id', '!=', quant.propagated_from_id.id)]
quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context)
product_uom_rounding = quant.product_id.uom_id.rounding
for quant_neg, qty in quants:
if not quant_neg or not solving_quant:
continue
to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context)
if not to_solve_quant_ids:
continue
solving_qty = qty
solved_quant_ids = []
for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context):
if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0:
continue
solved_quant_ids.append(to_solve_quant.id)
self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context)
solving_qty -= min(solving_qty, to_solve_quant.qty)
remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context)
remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context)
#if the reconciliation was not complete, we need to link together the remaining parts
if remaining_neg_quant:
remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context)
if remaining_to_solve_quant_ids:
self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context)
if solving_quant.propagated_from_id and solved_quant_ids:
self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context)
#delete the reconciled quants, as it is replaced by the solved quants
self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context)
if solved_quant_ids:
#price update + accounting entries adjustments
self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context)
#merge history (and cost?)
self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context)
self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context)
solving_quant = remaining_solving_quant
def _price_update(self, cr, uid, ids, newprice, context=None):
self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context)
def quants_unreserve(self, cr, uid, move, context=None):
related_quants = [x.id for x in move.reserved_quant_ids]
if related_quants:
#if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed
if move.picking_id:
self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context)
if move.partially_available:
self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context)
self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context)
def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None):
''' Implementation of removal strategies
If it can not reserve, it will return a tuple (None, qty)
'''
if context is None:
context = {}
domain += location and [('location_id', 'child_of', location.id)] or []
domain += [('product_id', '=', product.id)]
if context.get('force_company'):
domain += [('company_id', '=', context.get('force_company'))]
else:
domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)]
res = []
offset = 0
while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0:
quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context)
if not quants:
res.append((None, quantity))
break
for quant in self.browse(cr, uid, quants, context=context):
rounding = product.uom_id.rounding
if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0:
res += [(quant, abs(quant.qty))]
quantity -= abs(quant.qty)
elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0:
res += [(quant, quantity)]
quantity = 0
break
offset += 10
return res
def _check_location(self, cr, uid, location, context=None):
if location.usage == 'view':
raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name))
return True
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "priority desc, date asc, id desc"
def _set_min_date(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context)
def _set_priority(self, cr, uid, id, field, value, arg, context=None):
move_obj = self.pool.get("stock.move")
if value:
move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines]
move_obj.write(cr, uid, move_ids, {'priority': value}, context=context)
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False, 'priority': '1'}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected),
max(priority)
from
stock_move
where
picking_id IN %s
group by
picking_id""", (tuple(ids),))
for pick, dt1, dt2, prio in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
res[pick]['priority'] = prio
return res
def create(self, cr, user, vals, context=None):
context = context or {}
if ('name' not in vals) or (vals.get('name') in ('/', False)):
ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False))
sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id
vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context)
return super(stock_picking, self).create(cr, user, vals, context)
def _state_get(self, cr, uid, ids, field_name, arg, context=None):
'''The state of a picking depends on the state of its related stock.move
draft: the picking has no line or any one of the lines is draft
done, draft, cancel: all lines are done / draft / cancel
confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial)
'''
res = {}
for pick in self.browse(cr, uid, ids, context=context):
if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]):
res[pick.id] = 'draft'
continue
if all([x.state == 'cancel' for x in pick.move_lines]):
res[pick.id] = 'cancel'
continue
if all([x.state in ('cancel', 'done') for x in pick.move_lines]):
res[pick.id] = 'done'
continue
order = {'confirmed': 0, 'waiting': 1, 'assigned': 2}
order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'}
lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')]
if pick.move_type == 'one':
res[pick.id] = order_inv[min(lst)]
else:
#we are in the case of partial delivery, so if all move are assigned, picking
#should be assign too, else if one of the move is assigned, or partially available, picking should be
#in partially available state, otherwise, picking is in waiting or confirmed state
res[pick.id] = order_inv[max(lst)]
if not all(x == 2 for x in lst):
if any(x == 2 for x in lst):
res[pick.id] = 'partially_available'
else:
#if all moves aren't assigned, check if we have one product partially available
for move in pick.move_lines:
if move.partially_available:
res[pick.id] = 'partially_available'
break
return res
def _get_pickings(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id:
res.add(move.picking_id.id)
return list(res)
def _get_pickings_dates_priority(self, cr, uid, ids, context=None):
res = set()
for move in self.browse(cr, uid, ids, context=context):
if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority):
res.add(move.picking_id.id)
return list(res)
def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
if pick.pack_operation_ids:
res[pick.id] = True
return res
def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for pick in self.browse(cr, uid, ids, context=context):
res[pick.id] = False
for move in pick.move_lines:
if move.reserved_quant_ids:
res[pick.id] = True
continue
return res
def check_group_lot(self, cr, uid, context=None):
""" This function will return true if we have the setting to use lots activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot')
def check_group_pack(self, cr, uid, context=None):
""" This function will return true if we have the setting to use package activated. """
return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot')
def action_assign_owner(self, cr, uid, ids, context=None):
for picking in self.browse(cr, uid, ids, context=context):
packop_ids = [op.id for op in picking.pack_operation_ids]
self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context)
_columns = {
'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False),
'note': fields.text('Notes'),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.function(_state_get, type="selection", copy=False,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20),
'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)},
selection=[
('draft', 'Draft'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('partially_available', 'Partially Available'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], string='Status', readonly=True, select=True, track_visibility='onchange',
help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Partially Available: some products are available and reserved\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority',
store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves",
track_visibility='onchange', required=True),
'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date,
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'),
'max_date': fields.function(get_min_max_date, multi="min_max_date",
store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True),
'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'),
'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True),
'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"),
'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"),
# Used to search on pickings
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False),
'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True),
'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True),
'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True,
store={
'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10),
'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10),
}),
}
_defaults = {
'name': '/',
'state': 'draft',
'move_type': 'direct',
'priority': '1', # normal
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c),
'recompute_pack_op': True,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'),
]
def do_print_picking(self, cr, uid, ids, context=None):
'''This function prints the picking list'''
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context)
def action_confirm(self, cr, uid, ids, context=None):
todo = []
todo_force_assign = []
for picking in self.browse(cr, uid, ids, context=context):
if picking.location_id.usage in ('supplier', 'inventory', 'production'):
todo_force_assign.append(picking.id)
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
if todo_force_assign:
self.force_assign(cr, uid, todo_force_assign, context=context)
return True
def action_assign(self, cr, uid, ids, context=None):
""" Check availability of picking moves.
This has the effect of changing the state and reserve quants on available moves, and may
also impact the state of the picking as it is computed based on move's states.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
if pick.state == 'draft':
self.action_confirm(cr, uid, [pick.id], context=context)
#skip the moves that don't need to be checked
move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')]
if not move_ids:
raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context)
return True
def force_assign(self, cr, uid, ids, context=None):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context)
#pack_operation might have changed and need to be recomputed
self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
return True
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done by processing the Stock Moves of the Picking
Normally that happens when the button "Done" is pressed on a Picking view.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context))
elif move.state in ('assigned', 'confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
#on picking deletion, cancel its move then unlink them too
move_obj = self.pool.get('stock.move')
context = context or {}
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [move.id for move in pick.move_lines]
move_obj.action_cancel(cr, uid, move_ids, context=context)
move_obj.unlink(cr, uid, move_ids, context=context)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('move_lines') and not vals.get('pack_operation_ids'):
# pack operations are directly dependant of move lines, it needs to be recomputed
pack_operation_obj = self.pool['stock.pack.operation']
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
res = super(stock_picking, self).write(cr, uid, ids, vals, context=context)
#if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both
if 'move_lines' in vals or 'pack_operation_ids' in vals:
self.do_recompute_remaining_quantities(cr, uid, ids, context=context)
return res
def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None):
""" Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines.
"""
if not backorder_moves:
backorder_moves = picking.move_lines
backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')]
if 'do_only_split' in context and context['do_only_split']:
backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])]
if backorder_move_ids:
backorder_id = self.copy(cr, uid, picking.id, {
'name': '/',
'move_lines': [],
'pack_operation_ids': [],
'backorder_id': picking.id,
})
backorder = self.browse(cr, uid, backorder_id, context=context)
self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context)
move_obj = self.pool.get("stock.move")
move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context)
self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.action_confirm(cr, uid, [backorder_id], context=context)
return backorder_id
return False
@api.cr_uid_ids_context
def recheck_availability(self, cr, uid, picking_ids, context=None):
self.action_assign(cr, uid, picking_ids, context=context)
self.do_prepare_partial(cr, uid, picking_ids, context=context)
def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None):
"""This method searches for the higher level packages that can be moved as a single operation, given a list of quants
to move and their suggested destination, and returns the list of matching packages.
"""
# Try to find as much as possible top-level packages that can be moved
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
top_lvl_packages = set()
quants_to_compare = quants_suggested_locations.keys()
for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])):
loop = True
test_pack = pack
good_pack = False
pack_destination = False
while loop:
pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context)
all_in = True
for quant in quant_obj.browse(cr, uid, pack_quants, context=context):
# If the quant is not in the quants to compare and not in the common location
if not quant in quants_to_compare:
all_in = False
break
else:
#if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation)
if not pack_destination:
pack_destination = quants_suggested_locations[quant]
elif pack_destination != quants_suggested_locations[quant]:
all_in = False
break
if all_in:
good_pack = test_pack
if test_pack.parent_id:
test_pack = test_pack.parent_id
else:
#stop the loop when there's no parent package anymore
loop = False
else:
#stop the loop when the package test_pack is not totally reserved for moves of this picking
#(some quants may be reserved for other picking or not reserved at all)
loop = False
if good_pack:
top_lvl_packages.add(good_pack)
return list(top_lvl_packages)
def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None):
""" returns a list of dict, ready to be used in create() of stock.pack.operation.
:param picking: browse record (stock.picking)
:param quants: browse record list (stock.quant). List of quants associated to the picking
:param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking
"""
def _picking_putaway_apply(product):
location = False
# Search putaway strategy
if product_putaway_strats.get(product.id):
location = product_putaway_strats[product.id]
else:
location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context)
product_putaway_strats[product.id] = location
return location or picking.location_dest_id.id
# If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.
product_uom = {} # Determines UoM used in pack operations
location_dest_id = None
location_id = None
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not product_uom.get(move.product_id.id):
product_uom[move.product_id.id] = move.product_id.uom_id
if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor:
product_uom[move.product_id.id] = move.product_uom
if not move.scrapped:
if location_dest_id and move.location_dest_id.id != location_dest_id:
raise Warning(_('The destination location must be the same for all the moves of the picking.'))
location_dest_id = move.location_dest_id.id
if location_id and move.location_id.id != location_id:
raise Warning(_('The source location must be the same for all the moves of the picking.'))
location_id = move.location_id.id
pack_obj = self.pool.get("stock.quant.package")
quant_obj = self.pool.get("stock.quant")
vals = []
qtys_grouped = {}
#for each quant of the picking, find the suggested location
quants_suggested_locations = {}
product_putaway_strats = {}
for quant in quants:
if quant.qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(quant.product_id)
quants_suggested_locations[quant] = suggested_location_id
#find the packages we can movei as a whole
top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context)
# and then create pack operations for the top-level packages found
for pack in top_lvl_packages:
pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context)
pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context)
vals.append({
'picking_id': picking.id,
'package_id': pack.id,
'product_qty': 1.0,
'location_id': pack.location_id.id,
'location_dest_id': quants_suggested_locations[pack_quants[0]],
'owner_id': pack.owner_id.id,
})
#remove the quants inside the package so that they are excluded from the rest of the computation
for quant in pack_quants:
del quants_suggested_locations[quant]
# Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location
for quant, dest_location_id in quants_suggested_locations.items():
key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += quant.qty
else:
qtys_grouped[key] = quant.qty
# Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)
for product, qty in forced_qties.items():
if qty <= 0:
continue
suggested_location_id = _picking_putaway_apply(product)
key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id)
if qtys_grouped.get(key):
qtys_grouped[key] += qty
else:
qtys_grouped[key] = qty
# Create the necessary operations for the grouped quants and remaining qtys
uom_obj = self.pool.get('product.uom')
prevals = {}
for key, qty in qtys_grouped.items():
product = self.pool.get("product.product").browse(cr, uid, key[0], context=context)
uom_id = product.uom_id.id
qty_uom = qty
if product_uom.get(key[0]):
uom_id = product_uom[key[0]].id
qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id)
val_dict = {
'picking_id': picking.id,
'product_qty': qty_uom,
'product_id': key[0],
'package_id': key[1],
'lot_id': key[2],
'owner_id': key[3],
'location_id': key[4],
'location_dest_id': key[5],
'product_uom_id': uom_id,
}
if key[0] in prevals:
prevals[key[0]].append(val_dict)
else:
prevals[key[0]] = [val_dict]
# prevals var holds the operations in order to create them in the same order than the picking stock moves if possible
processed_products = set()
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if move.product_id.id not in processed_products:
vals += prevals.get(move.product_id.id, [])
processed_products.add(move.product_id.id)
return vals
@api.cr_uid_ids_context
def open_barcode_interface(self, cr, uid, picking_ids, context=None):
final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0])
return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',}
@api.cr_uid_ids_context
def do_partial_open_barcode(self, cr, uid, picking_ids, context=None):
self.do_prepare_partial(cr, uid, picking_ids, context=context)
return self.open_barcode_interface(cr, uid, picking_ids, context=context)
@api.cr_uid_ids_context
def do_prepare_partial(self, cr, uid, picking_ids, context=None):
context = context or {}
pack_operation_obj = self.pool.get('stock.pack.operation')
#used to avoid recomputing the remaining quantities at each new pack operation created
ctx = context.copy()
ctx['no_recompute'] = True
#get list of existing operations and delete them
existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context)
if existing_package_ids:
pack_operation_obj.unlink(cr, uid, existing_package_ids, context)
for picking in self.browse(cr, uid, picking_ids, context=context):
forced_qties = {} # Quantity remaining after calculating reserved quants
picking_quants = []
#Calculate packages, reserved quants, qtys of this picking's moves
for move in picking.move_lines:
if move.state not in ('assigned', 'confirmed', 'waiting'):
continue
move_quants = move.reserved_quant_ids
picking_quants += move_quants
forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0
#if we used force_assign() on the move, or if the move is incoming, forced_qty > 0
if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0:
if forced_qties.get(move.product_id):
forced_qties[move.product_id] += forced_qty
else:
forced_qties[move.product_id] = forced_qty
for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context):
pack_operation_obj.create(cr, uid, vals, context=ctx)
#recompute the remaining quantities all at once
self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context)
self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context)
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, picking_ids, context=None):
"""
Will remove all quants for picking in picking_ids
"""
moves_to_unreserve = []
pack_line_to_unreserve = []
for picking in self.browse(cr, uid, picking_ids, context=context):
moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')]
pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids]
if moves_to_unreserve:
if pack_line_to_unreserve:
self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context)
self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context)
def recompute_remaining_qty(self, cr, uid, picking, context=None):
def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False):
move_dict = prod2move_ids[product_id][index]
qty_on_link = min(move_dict['remaining_qty'], qty_to_assign)
self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context)
if move_dict['remaining_qty'] == qty_on_link:
prod2move_ids[product_id].pop(index)
else:
move_dict['remaining_qty'] -= qty_on_link
return qty_on_link
def _create_link_for_quant(operation_id, quant, qty):
"""create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity"""
if not quant.reservation_id.id:
return _create_link_for_product(operation_id, quant.product_id.id, qty)
qty_on_link = 0
for i in range(0, len(prod2move_ids[quant.product_id.id])):
if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id:
continue
qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id)
break
return qty_on_link
def _create_link_for_product(operation_id, product_id, qty):
'''method that creates the link between a given operation and move(s) of given product, for the given quantity.
Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)'''
qty_to_assign = qty
prod_obj = self.pool.get("product.product")
product = prod_obj.browse(cr, uid, product_id)
rounding = product.uom_id.rounding
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
if prod2move_ids.get(product_id):
while prod2move_ids[product_id] and qtyassign_cmp > 0:
qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False)
qty_to_assign -= qty_on_link
qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding)
return qtyassign_cmp == 0
uom_obj = self.pool.get('product.uom')
package_obj = self.pool.get('stock.quant.package')
quant_obj = self.pool.get('stock.quant')
link_obj = self.pool.get('stock.move.operation.link')
quants_in_package_done = set()
prod2move_ids = {}
still_to_do = []
#make a dictionary giving for each product, the moves and related quantity that can be used in operation links
for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]:
if not prod2move_ids.get(move.product_id.id):
prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}]
else:
prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty})
need_rereserve = False
#sort the operations in order to give higher priority to those with a package, then a serial number
operations = picking.pack_operation_ids
operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
#delete existing operations to start again from scratch
links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context)
if links:
link_obj.unlink(cr, uid, links, context=context)
#1) first, try to create links when quants can be identified without any doubt
for ops in operations:
#for each operation, create the links with the stock move by seeking on the matching reserved quants,
#and deffer the operation if there is some ambiguity on the move to select
if ops.package_id and not ops.product_id:
#entire package
quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context)
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
remaining_qty_on_quant = quant.qty
if quant.reservation_id:
#avoid quants being counted twice
quants_in_package_done.add(quant.id)
qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty)
remaining_qty_on_quant -= qty_on_link
if remaining_qty_on_quant:
still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant))
need_rereserve = True
elif ops.product_id.id:
#Check moves with same product
qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for move_dict in prod2move_ids.get(ops.product_id.id, []):
move = move_dict['move']
for quant in move.reserved_quant_ids:
if not qty_to_assign > 0:
break
if quant.id in quants_in_package_done:
continue
#check if the quant is matching the operation details
if ops.package_id:
flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False
else:
flag = not quant.package_id.id
flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id)
flag = flag and (ops.owner_id.id == quant.owner_id.id)
if flag:
max_qty_on_link = min(quant.qty, qty_to_assign)
qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link)
qty_to_assign -= qty_on_link
qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding)
if qty_assign_cmp > 0:
#qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed
#all the quants (because they leave no choice on their related move and needs to be processed with higher priority)
still_to_do += [(ops, ops.product_id.id, qty_to_assign)]
need_rereserve = True
#2) then, process the remaining part
all_op_processed = True
for ops, product_id, remaining_qty in still_to_do:
all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed
return (need_rereserve, all_op_processed)
def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None):
need_rereserve = False
all_op_processed = True
if picking.pack_operation_ids:
need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context)
return need_rereserve, all_op_processed
@api.cr_uid_ids_context
def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None):
for picking in self.browse(cr, uid, picking_ids, context=context):
if picking.pack_operation_ids:
self.recompute_remaining_qty(cr, uid, picking, context=context)
def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None):
"""
Creates an extra move when there is no corresponding original move to be copied
"""
uom_obj = self.pool.get("product.uom")
uom_id = product.uom_id.id
qty = remaining_qty
if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id:
if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit
uom_id = op.product_uom_id.id
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP')
picking = op.picking_id
ref = product.default_code
name = '[' + ref + ']' + ' ' + product.name if ref else product.name
res = {
'picking_id': picking.id,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'product_id': product.id,
'product_uom': uom_id,
'product_uom_qty': qty,
'name': _('Extra Move: ') + name,
'state': 'draft',
'restrict_partner_id': op.owner_id,
}
return res
def _create_extra_moves(self, cr, uid, picking, context=None):
'''This function creates move lines on a picking, at the time of do_transfer, based on
unexpected product transfers (or exceeding quantities) found in the pack operations.
'''
move_obj = self.pool.get('stock.move')
operation_obj = self.pool.get('stock.pack.operation')
moves = []
for op in picking.pack_operation_ids:
for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items():
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0:
vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context)
moves.append(move_obj.create(cr, uid, vals, context=context))
if moves:
move_obj.action_confirm(cr, uid, moves, context=context)
return moves
def rereserve_pick(self, cr, uid, ids, context=None):
"""
This can be used to provide a button that rereserves taking into account the existing pack operations
"""
for pick in self.browse(cr, uid, ids, context=context):
self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context)
def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None):
""" Unreserve quants then try to reassign quants."""
stock_move_obj = self.pool.get('stock.move')
if not move_ids:
self.do_unreserve(cr, uid, [picking.id], context=context)
self.action_assign(cr, uid, [picking.id], context=context)
else:
stock_move_obj.do_unreserve(cr, uid, move_ids, context=context)
stock_move_obj.action_assign(cr, uid, move_ids, context=context)
@api.cr_uid_ids_context
def do_enter_transfer_details(self, cr, uid, picking, context=None):
if not context:
context = {}
context.update({
'active_model': self._name,
'active_ids': picking,
'active_id': len(picking) and picking[0] or False
})
created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context)
return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context)
@api.cr_uid_ids_context
def do_transfer(self, cr, uid, picking_ids, context=None):
"""
If no pack operation, we do simple action_done of the picking
Otherwise, do the pack operations
"""
if not context:
context = {}
stock_move_obj = self.pool.get('stock.move')
for picking in self.browse(cr, uid, picking_ids, context=context):
if not picking.pack_operation_ids:
self.action_done(cr, uid, [picking.id], context=context)
continue
else:
need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context)
#create extra moves in the picking (unexpected product moves coming from pack operations)
todo_move_ids = []
if not all_op_processed:
todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context)
#split move lines if needed
toassign_move_ids = []
for move in picking.move_lines:
remaining_qty = move.remaining_qty
if move.state in ('done', 'cancel'):
#ignore stock moves cancelled or already done
continue
elif move.state == 'draft':
toassign_move_ids.append(move.id)
if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0:
if move.state in ('draft', 'assigned', 'confirmed'):
todo_move_ids.append(move.id)
elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \
float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0:
new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context)
todo_move_ids.append(move.id)
#Assign move as it was assigned before
toassign_move_ids.append(new_move)
if need_rereserve or not all_op_processed:
if not picking.location_id.usage in ("supplier", "production", "inventory"):
self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context)
self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context)
if todo_move_ids and not context.get('do_only_split'):
self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context)
elif context.get('do_only_split'):
context = dict(context, split=todo_move_ids)
self._create_backorder(cr, uid, picking, context=context)
if toassign_move_ids:
stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context)
return True
@api.cr_uid_ids_context
def do_split(self, cr, uid, picking_ids, context=None):
""" just split the picking (create a backorder) without making it 'done' """
if context is None:
context = {}
ctx = context.copy()
ctx['do_only_split'] = True
return self.do_transfer(cr, uid, picking_ids, context=ctx)
def get_next_picking_for_ui(self, cr, uid, context=None):
""" returns the next pickings to process. Used in the barcode scanner UI"""
if context is None:
context = {}
domain = [('state', 'in', ('assigned', 'partially_available'))]
if context.get('default_picking_type_id'):
domain.append(('picking_type_id', '=', context['default_picking_type_id']))
return self.search(cr, uid, domain, context=context)
def action_done_from_ui(self, cr, uid, picking_id, context=None):
""" called when button 'done' is pushed in the barcode scanner UI """
#write qty_done into field product_qty for every package_operation before doing the transfer
pack_op_obj = self.pool.get('stock.pack.operation')
for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids:
pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context)
self.do_transfer(cr, uid, [picking_id], context=context)
#return id of next picking to work on
return self.get_next_picking_for_ui(cr, uid, context=context)
@api.cr_uid_ids_context
def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None):
""" Create a package with the current pack_operation_ids of the picking that aren't yet in a pack.
Used in the barcode scanner UI and the normal interface as well.
operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack"""
if operation_filter_ids == None:
operation_filter_ids = []
stock_operation_obj = self.pool.get('stock.pack.operation')
package_obj = self.pool.get('stock.quant.package')
stock_move_obj = self.pool.get('stock.move')
package_id = False
for picking_id in picking_ids:
operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)]
if operation_filter_ids != []:
operation_search_domain.append(('id', 'in', operation_filter_ids))
operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context)
pack_operation_ids = []
if operation_ids:
for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context):
#If we haven't done all qty in operation, we have to split into 2 operation
op = operation
if (operation.qty_done < operation.product_qty):
new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context)
stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0, 'lot_id': False}, context=context)
op = stock_operation_obj.browse(cr, uid, new_operation, context=context)
pack_operation_ids.append(op.id)
if op.product_id and op.location_id and op.location_dest_id:
stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context)
package_id = package_obj.create(cr, uid, {}, context=context)
stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context)
return package_id
def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None):
return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context)
def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None):
'''This function is called each time there barcode scanner reads an input'''
lot_obj = self.pool.get('stock.production.lot')
package_obj = self.pool.get('stock.quant.package')
product_obj = self.pool.get('product.product')
stock_operation_obj = self.pool.get('stock.pack.operation')
stock_location_obj = self.pool.get('stock.location')
answer = {'filter_loc': False, 'operation_id': False}
#check if the barcode correspond to a location
matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context)
if matching_location_ids:
#if we have a location, return immediatly with the location name
location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None)
answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None)
answer['filter_loc_id'] = matching_location_ids[0]
return answer
#check if the barcode correspond to a product
matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context)
if matching_product_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a lot
matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_lot_ids:
lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context)
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
#check if the barcode correspond to a package
matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context)
if matching_package_ids:
op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context)
answer['operation_id'] = op_id
return answer
return answer
class stock_production_lot(osv.osv):
_name = 'stock.production.lot'
_inherit = ['mail.thread']
_description = 'Lot/Serial'
_columns = {
'name': fields.char('Serial Number', required=True, help="Unique Serial Number"),
'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True),
'create_date': fields.datetime('Creation Date'),
}
_defaults = {
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of lots
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
quant_obj = self.pool.get("stock.quant")
quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context)
moves = set()
for quant in quant_obj.browse(cr, uid, quants, context=context):
moves |= {move.id for move in quant.history_ids}
if moves:
return {
'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]",
'name': _('Traceability'),
'view_mode': 'tree,form',
'view_type': 'form',
'context': {'tree_view_ref': 'stock.view_move_tree'},
'res_model': 'stock.move',
'type': 'ir.actions.act_window',
}
return False
# ----------------------------------------------------
# Move
# ----------------------------------------------------
class stock_move(osv.osv):
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def get_price_unit(self, cr, uid, move, context=None):
""" Returns the unit price to store on the quant """
return move.price_unit or move.product_id.standard_price
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name + ' > ' + line.location_dest_id.name
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _quantity_normalize(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context)
return res
def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for move in self.browse(cr, uid, ids, context=context):
qty = move.product_qty
for record in move.linked_move_operation_ids:
qty -= record.qty
# Keeping in product default UoM
res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding)
return res
def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id]
else:
res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id]
return res
def _get_product_availability(self, cr, uid, ids, field_name, args, context=None):
quant_obj = self.pool.get('stock.quant')
res = dict.fromkeys(ids, False)
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
res[move.id] = move.product_qty
else:
sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context)
quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context)
availability = 0
for quant in quant_obj.browse(cr, uid, quant_ids, context=context):
availability += quant.qty
res[move.id] = min(move.product_qty, availability)
return res
def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None):
settings_obj = self.pool.get('stock.config.settings')
uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, '')
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal':
res[move.id] = '' # 'not applicable' or 'n/a' could work too
continue
total_available = min(move.product_qty, move.reserved_availability + move.availability)
total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, context=context)
info = str(total_available)
#look in the settings if we need to display the UoM name or not
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
if config_ids:
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_uom:
info += ' ' + move.product_uom.name
if move.reserved_availability:
if move.reserved_availability != total_available:
#some of the available quantity is assigned and some are available but not reserved
reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, context=context)
info += _(' (%s reserved)') % str(reserved_available)
else:
#all available quantity is assigned
info += _(' (reserved)')
res[move.id] = info
return res
def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, 0)
for move in self.browse(cr, uid, ids, context=context):
res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids])
return res
def _get_move(self, cr, uid, ids, context=None):
res = set()
for quant in self.browse(cr, uid, ids, context=context):
if quant.reservation_id:
res.add(quant.reservation_id.id)
return list(res)
def _get_move_ids(self, cr, uid, ids, context=None):
res = []
for picking in self.browse(cr, uid, ids, context=context):
res += [x.id for x in picking.move_lines]
return res
def _get_moves_from_prod(self, cr, uid, ids, context=None):
if ids:
return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context)
return []
def _set_product_qty(self, cr, uid, id, field, value, arg, context=None):
""" The meaning of product_qty field changed lately and is now a functional field computing the quantity
in the default product UoM. This code has been added to raise an error if a write is made given a value
for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to
detect errors.
"""
raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.'))
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}),
'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={
_name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10),
}, string='Quantity',
help='Quantity in the default UoM of the product'),
'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True, states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'),
'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True,
states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True,
auto_join=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False),
'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True, copy=False,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False),
'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False),
'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.char("Source"),
'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True,
help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True),
'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False),
'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0,
states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"),
'procurement_id': fields.many2one('procurement.order', 'Procurement'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'),
'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'),
'inventory_id': fields.many2one('stock.inventory', 'Inventory'),
'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'),
'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False),
'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'),
'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'),
'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'),
'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'),
'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"),
'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."),
}
def _default_location_destination(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False
return False
def _default_location_source(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_type_id', False):
pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context)
return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False
return False
def _default_destination_address(self, cr, uid, context=None):
return False
def _default_group_id(self, cr, uid, context=None):
context = context or {}
if context.get('default_picking_id', False):
picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context)
return picking.group_id.id
return False
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'state': 'draft',
'priority': '1',
'product_uom_qty': 1.0,
'scrapped': False,
'date': fields.datetime.now,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': fields.datetime.now,
'procure_method': 'make_to_stock',
'propagate': True,
'partially_available': False,
'group_id': _default_group_id,
}
def _check_uom(self, cr, uid, ids, context=None):
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id:
return False
return True
_constraints = [
(_check_uom,
'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.',
['product_uom']),
]
def init(self, cr):
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)')
@api.cr_uid_ids_context
def do_unreserve(self, cr, uid, move_ids, context=None):
quant_obj = self.pool.get("stock.quant")
for move in self.browse(cr, uid, move_ids, context=context):
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move'))
quant_obj.quants_unreserve(cr, uid, move, context=context)
if self.find_move_ancestors(cr, uid, move, context=context):
self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context)
else:
self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context)
def _prepare_procurement_from_move(self, cr, uid, move, context=None):
origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/")
group_id = move.group_id and move.group_id.id or False
if move.rule_id:
if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id:
group_id = move.rule_id.group_id.id
elif move.rule_id.group_propagation_option == 'none':
group_id = False
return {
'name': move.rule_id and move.rule_id.name or "/",
'origin': origin,
'company_id': move.company_id and move.company_id.id or False,
'date_planned': move.date,
'product_id': move.product_id.id,
'product_qty': move.product_uom_qty,
'product_uom': move.product_uom.id,
'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty,
'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id,
'location_id': move.location_id.id,
'move_dest_id': move.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in move.route_ids],
'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False),
'priority': move.priority,
}
def _push_apply(self, cr, uid, moves, context=None):
push_obj = self.pool.get("stock.location.path")
for move in moves:
#1) if the move is already chained, there is no need to check push rules
#2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way
# to receive goods without triggering the push rules again (which would duplicate chained operations)
if not move.move_dest_id and not move.origin_returned_move_id:
domain = [('location_from_id', '=', move.location_dest_id.id)]
#priority goes to the route defined on the product and product category
route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids]
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#then we search on the warehouse if a rule can apply
wh_route_ids = []
if move.warehouse_id:
wh_route_ids = [x.id for x in move.warehouse_id.route_ids]
elif move.picking_type_id and move.picking_type_id.warehouse_id:
wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids]
if wh_route_ids:
rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context)
if not rules:
#if no specialized push rule has been found yet, we try to find a general one (without route)
rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
if rules:
rule = push_obj.browse(cr, uid, rules[0], context=context)
push_obj._apply(cr, uid, rule, move, context=context)
return True
def _create_procurement(self, cr, uid, move, context=None):
""" This will create a procurement order """
return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context)
def _create_procurements(self, cr, uid, moves, context=None):
res = []
for move in moves:
res.append(self._create_procurement(cr, uid, move, context=context))
return res
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
# Check that we do not modify a stock.move which is done
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
propagated_changes_dict = {}
#propagation of quantity change
if vals.get('product_uom_qty'):
propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty']
if vals.get('product_uom_id'):
propagated_changes_dict['product_uom_id'] = vals['product_uom_id']
#propagation of expected date:
propagated_date_field = False
if vals.get('date_expected'):
#propagate any manual change of the expected date
propagated_date_field = 'date_expected'
elif (vals.get('state', '') == 'done' and vals.get('date')):
#propagate also any delta observed when setting the move as done
propagated_date_field = 'date'
if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict):
#any propagation is (maybe) needed
for move in self.browse(cr, uid, ids, context=context):
if move.move_dest_id and move.propagate:
if 'date_expected' in propagated_changes_dict:
propagated_changes_dict.pop('date_expected')
if propagated_date_field:
current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT)
delta = new_date - current_date
if abs(delta.days) >= move.company_id.propagation_minimum_delta:
old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT)
new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
propagated_changes_dict['date_expected'] = new_move_date
#For pushed moves as well as for pulled moves, propagate by recursive call of write().
#Note that, for pulled moves we intentionally don't propagate on the procurement.
if propagated_changes_dict:
self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context)
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <= 0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: Odoo will not "
"automatically generate a back order.")})
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uom_qty': 0.00
}
if (not product_id) or (product_uos_qty <= 0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_uom_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_uom_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_uom_qty': 1.00,
'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def _prepare_picking_assign(self, cr, uid, move, context=None):
""" Prepares a new picking for this move as it could not be assigned to
another picking. This method is designed to be inherited.
"""
values = {
'origin': move.origin,
'company_id': move.company_id and move.company_id.id or False,
'move_type': move.group_id and move.group_id.move_type or 'direct',
'partner_id': move.partner_id.id or False,
'picking_type_id': move.picking_type_id and move.picking_type_id.id or False,
}
return values
@api.cr_uid_ids_context
def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None):
"""Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to
(and company). Those attributes are also given as parameters.
"""
pick_obj = self.pool.get("stock.picking")
# Use a SQL query as doing with the ORM will split it in different queries with id IN (,,)
# In the next version, the locations on the picking should be stored again.
query = """
SELECT stock_picking.id FROM stock_picking, stock_move
WHERE
stock_picking.state in ('draft', 'confirmed', 'waiting') AND
stock_move.picking_id = stock_picking.id AND
stock_move.location_id = %s AND
stock_move.location_dest_id = %s AND
"""
params = (location_from, location_to)
if not procurement_group:
query += "stock_picking.group_id IS NULL LIMIT 1"
else:
query += "stock_picking.group_id = %s LIMIT 1"
params += (procurement_group,)
cr.execute(query, params)
[pick] = cr.fetchone() or [None]
if not pick:
move = self.browse(cr, uid, move_ids, context=context)[0]
values = self._prepare_picking_assign(cr, uid, move, context=context)
pick = pick_obj.create(cr, uid, values, context=context)
return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context)
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': {'date': date_expected}}
def attribute_price(self, cr, uid, move, context=None):
"""
Attribute price to move, important in inter-company moves or receipts with only one partner
"""
if not move.price_unit:
price = move.product_id.standard_price
self.write(cr, uid, [move.id], {'price_unit': price})
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move or put it in waiting if it's linked to another move.
@return: List of ids.
"""
if not context:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
states = {
'confirmed': [],
'waiting': []
}
to_assign = {}
for move in self.browse(cr, uid, ids, context=context):
self.attribute_price(cr, uid, move, context=context)
state = 'confirmed'
#if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available)
if move.move_orig_ids:
state = 'waiting'
#if the move is split and some of the ancestor was preceeded, then it's waiting as well
elif move.split_from:
move2 = move.split_from
while move2 and state != 'waiting':
if move2.move_orig_ids:
state = 'waiting'
move2 = move2.split_from
states[state].append(move.id)
if not move.picking_id and move.picking_type_id:
key = (move.group_id.id, move.location_id.id, move.location_dest_id.id)
if key not in to_assign:
to_assign[key] = []
to_assign[key].append(move.id)
moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order']
self._create_procurements(cr, uid, moves, context=context)
for move in moves:
states['waiting'].append(move.id)
states['confirmed'].remove(move.id)
for state, write_ids in states.items():
if len(write_ids):
self.write(cr, uid, write_ids, {'state': state})
#assign picking in batch for all confirmed move that share the same details
for key, move_ids in to_assign.items():
procurement_group, location_from, location_to = key
self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context)
moves = self.browse(cr, uid, ids, context=context)
self._push_apply(cr, uid, moves, context=context)
return ids
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'assigned'}, context=context)
def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None):
check = False
if product.track_all and not location_dest.usage == 'inventory':
check = True
elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal':
check = True
elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal':
check = True
if check and not lot_id:
raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name))
def check_tracking(self, cr, uid, move, lot_id, context=None):
""" Checks if serial number is assigned to stock move or not and raise an error if it had to.
"""
self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context)
def action_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
"""
context = context or {}
quant_obj = self.pool.get("stock.quant")
to_assign_moves = []
main_domain = {}
todo_moves = []
operations = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('confirmed', 'waiting', 'assigned'):
continue
if move.location_id.usage in ('supplier', 'inventory', 'production'):
to_assign_moves.append(move.id)
#in case the move is returned, we want to try to find quants before forcing the assignment
if not move.origin_returned_move_id:
continue
if move.product_id.type == 'consu':
to_assign_moves.append(move.id)
continue
else:
todo_moves.append(move)
#we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only
main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)]
#if the move is preceeded, restrict the choice of quants in the ones moved previously in original move
ancestors = self.find_move_ancestors(cr, uid, move, context=context)
if move.state == 'waiting' and not ancestors:
#if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock
main_domain[move.id] += [('id', '=', False)]
elif ancestors:
main_domain[move.id] += [('history_ids', 'in', ancestors)]
#if the move is returned from another, restrict the choice of quants to the ones that follow the returned move
if move.origin_returned_move_id:
main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)]
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
# Check all ops and sort them: we want to process first the packages, then operations with lot then the rest
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
#first try to find quants based on specific domains given by linked operations
for record in ops.linked_move_operation_ids:
move = record.move_id
if move.id in main_domain:
domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
qty = record.qty
if qty:
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, record, context=context)
for move in todo_moves:
if move.linked_move_operation_ids:
continue
#then if the move isn't totally assigned, try to find quants without any specific domain
if move.state != 'assigned':
qty_already_assigned = move.reserved_availability
qty = move.product_qty - qty_already_assigned
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
#force assignation of consumable products and incoming from supplier/inventory/production
if to_assign_moves:
self.force_assign(cr, uid, to_assign_moves, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
procurement_obj = self.pool.get('procurement.order')
context = context or {}
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
raise osv.except_osv(_('Operation Forbidden!'),
_('You cannot cancel a stock move that has been set to \'Done\'.'))
if move.reserved_quant_ids:
self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context)
if context.get('cancel_procurement'):
if move.propagate:
procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context)
procurement_obj.cancel(cr, uid, procurement_ids, context=context)
else:
if move.move_dest_id:
if move.propagate:
self.action_cancel(cr, uid, [move.move_dest_id.id], context=context)
elif move.move_dest_id.state == 'waiting':
#If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead)
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if move.procurement_id:
# Does the same as procurement check, only eliminating a refresh
procs_to_check.append(move.procurement_id.id)
res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if procs_to_check:
procurement_obj.check(cr, uid, procs_to_check, context=context)
return res
def _check_package_from_moves(self, cr, uid, ids, context=None):
pack_obj = self.pool.get("stock.quant.package")
packs = set()
for move in self.browse(cr, uid, ids, context=context):
packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0])
return pack_obj._check_location_constraint(cr, uid, list(packs), context=context)
def find_move_ancestors(self, cr, uid, move, context=None):
'''Find the first level ancestors of given move '''
ancestors = []
move2 = move
while move2:
ancestors += [x.id for x in move2.move_orig_ids]
#loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them)
move2 = not move2.move_orig_ids and move2.split_from or False
return ancestors
@api.cr_uid_ids_context
def recalculate_move_state(self, cr, uid, move_ids, context=None):
'''Recompute the state of moves given because their reserved quants were used to fulfill another operation'''
for move in self.browse(cr, uid, move_ids, context=context):
vals = {}
reserved_quant_ids = move.reserved_quant_ids
if len(reserved_quant_ids) > 0 and not move.partially_available:
vals['partially_available'] = True
if len(reserved_quant_ids) == 0 and move.partially_available:
vals['partially_available'] = False
if move.state == 'assigned':
if self.find_move_ancestors(cr, uid, move, context=context):
vals['state'] = 'waiting'
else:
vals['state'] = 'confirmed'
if vals:
self.write(cr, uid, [move.id], vals, context=context)
def action_done(self, cr, uid, ids, context=None):
""" Process completely the moves given as ids and if all moves are done, it will finish the picking.
"""
context = context or {}
picking_obj = self.pool.get("stock.picking")
quant_obj = self.pool.get("stock.quant")
todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"]
if todo:
ids = self.action_confirm(cr, uid, todo, context=context)
pickings = set()
procurement_ids = set()
#Search operations that are linked to the moves
operations = set()
move_qty = {}
for move in self.browse(cr, uid, ids, context=context):
move_qty[move.id] = move.product_qty
for link in move.linked_move_operation_ids:
operations.add(link.operation_id)
#Sort operations according to entire packages first, then package + lot, package only, lot only
operations = list(operations)
operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0))
for ops in operations:
if ops.picking_id:
pickings.add(ops.picking_id.id)
main_domain = [('qty', '>', 0)]
for record in ops.linked_move_operation_ids:
move = record.move_id
self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context)
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context)
quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list,
restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
if ops.product_id:
#If a product is given, the result is always put immediately in the result package (if it is False, they are without package)
quant_dest_package_id = ops.result_package_id.id
ctx = context
else:
# When a pack is moved entirely, the quants should not be written anything for the destination package
quant_dest_package_id = False
ctx = context.copy()
ctx['entire_pack'] = True
quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx)
# Handle pack in pack
if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id:
self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context)
if not move_qty.get(move.id):
raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name))
move_qty[move.id] -= record.qty
#Check for remaining qtys and unreserve/check move_dest_id in
move_dest_ids = set()
for move in self.browse(cr, uid, ids, context=context):
move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding)
if move_qty_cmp > 0: # (=In case no pack operations in picking)
main_domain = [('qty', '>', 0)]
prefered_domain = [('reservation_id', '=', move.id)]
fallback_domain = [('reservation_id', '=', False)]
fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]
prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2]
self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context)
qty = move_qty[move.id]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context)
# If the move has a destination, add it to the list to reserve
if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):
move_dest_ids.add(move.move_dest_id.id)
if move.procurement_id:
procurement_ids.add(move.procurement_id.id)
#unreserve the quants and make them available for other operations/moves
quant_obj.quants_unreserve(cr, uid, move, context=context)
# Check the packages have been placed in the correct locations
self._check_package_from_moves(cr, uid, ids, context=context)
#set the move as done
self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context)
#assign destination moves
if move_dest_ids:
self.action_assign(cr, uid, list(move_dest_ids), context=context)
#check picking state to set the date_done is needed
done_picking = []
for picking in picking_obj.browse(cr, uid, list(pickings), context=context):
if picking.state == 'done' and not picking.date_done:
done_picking.append(picking.id)
if done_picking:
picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
context = context or {}
for move in self.browse(cr, uid, ids, context=context):
if move.state not in ('draft', 'cancel'):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(cr, uid, ids, context=context)
def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
quant_obj = self.pool.get("stock.quant")
#quantity should be given in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
#Previously used to prevent scraping from virtual location but not necessary anymore
#if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
#raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_uom_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
# We "flag" the quant from which we want to scrap the products. To do so:
# - we select the quants related to the move we scrap from
# - we reserve the quants with the scrapped move
# See self.action_done, et particularly how is defined the "prefered_domain" for clarification
scrap_move = self.browse(cr, uid, new_move, context=context)
if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'):
domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])]
# We use scrap_move data since a reservation makes sense for a move not already done
quants = quant_obj.quants_get_prefered_domain(cr, uid, scrap_move.location_id,
scrap_move.product_id, quantity, domain=domain, prefered_domain_list=[],
restrict_lot_id=scrap_move.restrict_lot_id.id, restrict_partner_id=scrap_move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context)
self.action_done(cr, uid, res, context=context)
return res
def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None):
""" Splits qty from move move into a new move
:param move: browse record
:param qty: float. quantity to split (given in product UoM)
:param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot.
:param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner.
:param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move
returns the ID of the backorder move created
"""
if move.state in ('done', 'cancel'):
raise osv.except_osv(_('Error'), _('You cannot split a move done'))
if move.state == 'draft':
#we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in
#case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode.
raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.'))
if move.product_qty <= qty or qty == 0:
return move.id
uom_obj = self.pool.get('product.uom')
context = context or {}
#HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM
uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context)
uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty
defaults = {
'product_uom_qty': uom_qty,
'product_uos_qty': uos_qty,
'procure_method': 'make_to_stock',
'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'split_from': move.id,
'procurement_id': move.procurement_id.id,
'move_dest_id': move.move_dest_id.id,
'origin_returned_move_id': move.origin_returned_move_id.id,
}
if context.get('source_location_id'):
defaults['location_id'] = context['source_location_id']
new_move = self.copy(cr, uid, move.id, defaults, context=context)
ctx = context.copy()
ctx['do_not_propagate'] = True
self.write(cr, uid, [move.id], {
'product_uom_qty': move.product_uom_qty - uom_qty,
'product_uos_qty': move.product_uos_qty - uos_qty,
}, context=ctx)
if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'):
new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context)
self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context)
#returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and
#thus the result of action_confirm should always be a list of 1 element length)
return self.action_confirm(cr, uid, [new_move], context=context)[0]
def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None):
"""
Returns the code the picking type should have. This can easily be used
to check if a move is internal or not
move, location_id and location_dest_id are browse records
"""
code = 'internal'
src_loc = location_id or move.location_id
dest_loc = location_dest_id or move.location_dest_id
if src_loc.usage == 'internal' and dest_loc.usage != 'internal':
code = 'outgoing'
if src_loc.usage != 'internal' and dest_loc.usage == 'internal':
code = 'incoming'
return code
def _get_taxes(self, cr, uid, move, context=None):
return []
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = False
if inv.move_ids:
res[inv.id] = True
return res
def _get_available_filters(self, cr, uid, context=None):
"""
This function will return the list of filter allowed according to the options checked
in 'Settings\Warehouse'.
:rtype: list of tuple
"""
#default available choices
res_filter = [('none', _('All products')), ('partial', _('Manual Selection of Products')), ('product', _('One product only'))]
settings_obj = self.pool.get('stock.config.settings')
config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context)
#If we don't have updated config until now, all fields are by default false and so should be not dipslayed
if not config_ids:
return res_filter
stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context)
if stock_settings.group_stock_tracking_owner:
res_filter.append(('owner', _('One owner only')))
res_filter.append(('product_owner', _('One product for a specific owner')))
if stock_settings.group_stock_tracking_lot:
res_filter.append(('lot', _('One Lot/Serial Number')))
if stock_settings.group_stock_packaging:
res_filter.append(('pack', _('A Pack')))
return res_filter
def _get_total_qty(self, cr, uid, ids, field_name, args, context=None):
res = {}
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = sum([x.product_qty for x in inv.line_ids])
return res
INVENTORY_STATE_SELECTION = [
('draft', 'Draft'),
('cancel', 'Cancelled'),
('confirm', 'In Progress'),
('done', 'Validated'),
]
_columns = {
'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."),
'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."),
'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True),
'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}),
'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}),
'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."),
'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."),
'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."),
'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False),
'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'),
'filter': fields.selection(_get_available_filters, 'Inventory of', required=True,
help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\
"(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\
"system propose for a single product / lot /... "),
'total_qty': fields.function(_get_total_qty, type="float"),
}
def _default_stock_location(self, cr, uid, context=None):
try:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0')
return warehouse.lot_stock_id.id
except:
return False
_defaults = {
'date': fields.datetime.now,
'state': 'draft',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'location_id': _default_stock_location,
'filter': 'none',
}
def reset_real_qty(self, cr, uid, ids, context=None):
inventory = self.browse(cr, uid, ids[0], context=context)
line_ids = [line.id for line in inventory.line_ids]
self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0})
return True
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
for inventory_line in inv.line_ids:
if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty:
raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty)))
self.action_check(cr, uid, [inv.id], context=context)
self.write(cr, uid, [inv.id], {'state': 'done'}, context=context)
self.post_inventory(cr, uid, inv, context=context)
return True
def post_inventory(self, cr, uid, inv, context=None):
#The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory
#as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior
#as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want).
move_obj = self.pool.get('stock.move')
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context)
def action_check(self, cr, uid, ids, context=None):
""" Checks the inventory and computes the stock move to do
@return: True
"""
inventory_line_obj = self.pool.get('stock.inventory.line')
stock_move_obj = self.pool.get('stock.move')
for inventory in self.browse(cr, uid, ids, context=context):
#first remove the existing stock moves linked to this inventory
move_ids = [move.id for move in inventory.move_ids]
stock_move_obj.unlink(cr, uid, move_ids, context=context)
for line in inventory.line_ids:
#compare the checked quantities on inventory lines to the theorical one
stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context)
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context)
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
self.action_cancel_draft(cr, uid, ids, context=context)
def prepare_inventory(self, cr, uid, ids, context=None):
inventory_line_obj = self.pool.get('stock.inventory.line')
for inventory in self.browse(cr, uid, ids, context=context):
# If there are inventory lines already (e.g. from import), respect those and set their theoretical qty
line_ids = [line.id for line in inventory.line_ids]
if not line_ids and inventory.filter != 'partial':
#compute the inventory lines and create them
vals = self._get_inventory_lines(cr, uid, inventory, context=context)
for product_line in vals:
inventory_line_obj.create(cr, uid, product_line, context=context)
return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
def _get_inventory_lines(self, cr, uid, inventory, context=None):
location_obj = self.pool.get('stock.location')
product_obj = self.pool.get('product.product')
location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context)
domain = ' location_id in %s'
args = (tuple(location_ids),)
if inventory.partner_id:
domain += ' and owner_id = %s'
args += (inventory.partner_id.id,)
if inventory.lot_id:
domain += ' and lot_id = %s'
args += (inventory.lot_id.id,)
if inventory.product_id:
domain += ' and product_id = %s'
args += (inventory.product_id.id,)
if inventory.package_id:
domain += ' and package_id = %s'
args += (inventory.package_id.id,)
cr.execute('''
SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id
FROM stock_quant WHERE''' + domain + '''
GROUP BY product_id, location_id, lot_id, package_id, partner_id
''', args)
vals = []
for product_line in cr.dictfetchall():
#replace the None the dictionary by False, because falsy values are tested later on
for key, value in product_line.items():
if not value:
product_line[key] = False
product_line['inventory_id'] = inventory.id
product_line['theoretical_qty'] = product_line['product_qty']
if product_line['product_id']:
product = product_obj.browse(cr, uid, product_line['product_id'], context=context)
product_line['product_uom_id'] = product.uom_id.id
vals.append(product_line)
return vals
def _check_filter_product(self, cr, uid, ids, context=None):
for inventory in self.browse(cr, uid, ids, context=context):
if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id:
return True
if inventory.filter not in ('product', 'product_owner') and inventory.product_id:
return False
if inventory.filter != 'lot' and inventory.lot_id:
return False
if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id:
return False
if inventory.filter != 'pack' and inventory.package_id:
return False
return True
def onchange_filter(self, cr, uid, ids, filter, context=None):
to_clean = { 'value': {} }
if filter not in ('product', 'product_owner'):
to_clean['value']['product_id'] = False
if filter != 'lot':
to_clean['value']['lot_id'] = False
if filter not in ('owner', 'product_owner'):
to_clean['value']['partner_id'] = False
if filter != 'pack':
to_clean['value']['package_id'] = False
return to_clean
_constraints = [
(_check_filter_product, 'The selected inventory options are not coherent.',
['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']),
]
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_order = "inventory_id, location_name, product_code, product_name, prodlot_name"
def _get_product_name_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context)
def _get_location_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context)
def _get_prodlot_change(self, cr, uid, ids, context=None):
return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context)
def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None):
res = {}
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
for line in self.browse(cr, uid, ids, context=context):
quant_ids = self._get_quants(cr, uid, line, context=context)
quants = quant_obj.browse(cr, uid, quant_ids, context=context)
tot_qty = sum([x.qty for x in quants])
if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id:
tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context)
res[line.id] = tot_qty
return res
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True, select=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'package_id': fields.many2one('stock.quant.package', 'Pack', select=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True),
'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'),
store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),},
readonly=True, string="Theoretical Quantity"),
'partner_id': fields.many2one('res.partner', 'Owner'),
'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={
'product.product': (_get_product_name_change, ['name', 'default_code'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}),
'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={
'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}),
'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={
'stock.production.lot': (_get_prodlot_change, ['name'], 20),
'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}),
}
_defaults = {
'product_qty': 0,
'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1]
}
def _get_quants(self, cr, uid, line, context=None):
quant_obj = self.pool["stock.quant"]
dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id),
('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)]
quants = quant_obj.search(cr, uid, dom, context=context)
return quants
def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None):
quant_obj = self.pool["stock.quant"]
uom_obj = self.pool["product.uom"]
res = {'value': {}}
# If no UoM already put the default UoM of the product
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context)
if product.uom_id.category_id.id != uom.category_id.id:
res['value']['product_uom_id'] = product.uom_id.id
res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]}
uom_id = product.uom_id.id
# Calculate theoretical quantity by searching the quants as in quants_get
if product_id and location_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
if not company_id:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id),
('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)]
quants = quant_obj.search(cr, uid, dom, context=context)
th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)])
if product_id and uom_id and product.uom_id.id != uom_id:
th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id)
res['value']['theoretical_qty'] = th_qty
res['value']['product_qty'] = th_qty
return res
def _resolve_inventory_line(self, cr, uid, inventory_line, context=None):
stock_move_obj = self.pool.get('stock.move')
quant_obj = self.pool.get('stock.quant')
diff = inventory_line.theoretical_qty - inventory_line.product_qty
if not diff:
return
#each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move
vals = {
'name': _('INV:') + (inventory_line.inventory_id.name or ''),
'product_id': inventory_line.product_id.id,
'product_uom': inventory_line.product_uom_id.id,
'date': inventory_line.inventory_id.date,
'company_id': inventory_line.inventory_id.company_id.id,
'inventory_id': inventory_line.inventory_id.id,
'state': 'confirmed',
'restrict_lot_id': inventory_line.prod_lot_id.id,
'restrict_partner_id': inventory_line.partner_id.id,
}
inventory_location_id = inventory_line.product_id.property_stock_inventory.id
if diff < 0:
#found more than expected
vals['location_id'] = inventory_location_id
vals['location_dest_id'] = inventory_line.location_id.id
vals['product_uom_qty'] = -diff
else:
#found less than expected
vals['location_id'] = inventory_line.location_id.id
vals['location_dest_id'] = inventory_location_id
vals['product_uom_qty'] = diff
move_id = stock_move_obj.create(cr, uid, vals, context=context)
move = stock_move_obj.browse(cr, uid, move_id, context=context)
if diff > 0:
domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)]
preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]]
quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, move.product_qty, domain=domain, prefered_domain_list=preferred_domain_list, restrict_partner_id=move.restrict_partner_id.id, context=context)
quant_obj.quants_reserve(cr, uid, quants, move, context=context)
elif inventory_line.package_id:
stock_move_obj.action_done(cr, uid, move_id, context=context)
quants = [x.id for x in move.quant_ids]
quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context)
res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id),
('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context)
if res:
for quant in move.quant_ids:
if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already
quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context)
return move_id
# Should be left out in next version
def restrict_change(self, cr, uid, ids, theoretical_qty, context=None):
return {}
# Should be left out in next version
def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None):
""" Changes UoM
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_uom_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context)
return {'value': {'product_uom_id': uom or obj_product.uom_id.id}}
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Warehouse Name', required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True),
'partner_id': fields.many2one('res.partner', 'Address'),
'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True),
'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"),
'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'),
'reception_steps': fields.selection([
('one_step', 'Receive goods directly in stock (1 step)'),
('two_steps', 'Unload in input location then go to stock (2 steps)'),
('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments',
help="Default incoming route to follow", required=True),
'delivery_steps': fields.selection([
('ship_only', 'Ship directly from stock (Ship only)'),
('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'),
('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings',
help="Default outgoing route to follow", required=True),
'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'),
'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'),
'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'),
'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'),
'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'),
'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'),
'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'),
'out_type_id': fields.many2one('stock.picking.type', 'Out Type'),
'in_type_id': fields.many2one('stock.picking.type', 'In Type'),
'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'),
'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'),
'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'),
'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'),
'resupply_from_wh': fields.boolean('Resupply From Other Warehouses'),
'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'),
'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes',
help="Routes will be created for these resupply warehouses and you can select them on products and product categories"),
'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"),
}
def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None):
resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))])
if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id
resupply_wh_ids.add(default_resupply_wh_id)
resupply_wh_ids = list(resupply_wh_ids)
return {'value': {'resupply_wh_ids': resupply_wh_ids}}
def _get_external_transit_location(self, cr, uid, warehouse, context=None):
''' returns browse record of inter company transit location, if found'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1]
except:
return False
return location_obj.browse(cr, uid, inter_wh_loc, context=context)
def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None):
return {
'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'supplied_wh_id': warehouse.id,
'supplier_wh_id': wh.id,
}
def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None):
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
#create route selectable on the product to resupply the warehouse from another one
external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context)
internal_transit_location = warehouse.company_id.internal_transit_location_id
input_loc = warehouse.wh_input_stock_loc_id
if warehouse.reception_steps == 'one_step':
input_loc = warehouse.lot_stock_id
for wh in supplier_warehouses:
transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location
if transit_location:
output_loc = wh.wh_output_stock_loc_id
if wh.delivery_steps == 'ship_only':
output_loc = wh.lot_stock_id
# Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists)
mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0]
pull_obj.create(cr, uid, mto_pull_vals, context=context)
inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context)
inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context)
values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)]
pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#if the warehouse is also set as default resupply method, assign this route automatically to the warehouse
if default_resupply_wh and default_resupply_wh.id == wh.id:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context)
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'reception_steps': 'one_step',
'delivery_steps': 'ship_only',
}
_sql_constraints = [
('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'),
('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'),
]
def _get_partner_locations(self, cr, uid, ids, context=None):
''' returns a tuple made of the browse record of customer location and the browse record of supplier location'''
data_obj = self.pool.get('ir.model.data')
location_obj = self.pool.get('stock.location')
try:
customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1]
supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1]
except:
customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context)
customer_loc = customer_loc and customer_loc[0] or False
supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context)
supplier_loc = supplier_loc and supplier_loc[0] or False
if not (customer_loc and supplier_loc):
raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.'))
return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context)
def _location_used(self, cr, uid, location_id, warehouse, context=None):
pull_obj = self.pool['procurement.rule']
push_obj = self.pool['stock.location.path']
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), ('location_id', '=', location_id)], context=context)
pushs = push_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), ('location_dest_id', '=', location_id)], context=context)
if pulls or pushs:
return True
return False
def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
location_obj = self.pool.get('stock.location')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
if warehouse.reception_steps != new_reception_step:
if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context)
if new_reception_step != 'one_step':
location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context)
if new_reception_step == 'three_steps':
location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context)
if warehouse.delivery_steps != new_delivery_step:
if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context)
if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context):
location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context)
if new_delivery_step != 'ship_only':
location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context)
if new_delivery_step == 'pick_pack_ship':
location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context)
return True
def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'product_categ_selectable': True,
'product_selectable': False,
'sequence': 10,
}
def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None):
pull_rules_list = []
for from_loc, dest_loc, pick_type_id, warehouse in values:
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS
'warehouse_id': warehouse.id,
'propagate_warehouse_id': supply_warehouse,
})
return pull_rules_list
def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None):
first_rule = True
push_rules_list = []
pull_rules_list = []
for from_loc, dest_loc, pick_type_id in values:
push_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_from_id': from_loc.id,
'location_dest_id': dest_loc.id,
'route_id': new_route_id,
'auto': 'manual',
'picking_type_id': pick_type_id,
'active': active,
'warehouse_id': warehouse.id,
})
pull_rules_list.append({
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': new_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order',
'active': active,
'warehouse_id': warehouse.id,
})
first_rule = False
return push_rules_list, pull_rules_list
def _get_mto_route(self, cr, uid, context=None):
route_obj = self.pool.get('stock.location.route')
data_obj = self.pool.get('ir.model.data')
try:
mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1]
except:
mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context)
mto_route_id = mto_route_id and mto_route_id[0] or False
if not mto_route_id:
raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.'))
return mto_route_id
def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None):
""" Checks that the moves from the different """
pull_obj = self.pool.get('procurement.rule')
mto_route_id = self._get_mto_route(cr, uid, context=context)
rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context)
pull_obj.unlink(cr, uid, rules, context=context)
def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None):
mto_route_id = self._get_mto_route(cr, uid, context=context)
res = []
for value in values:
from_loc, dest_loc, pick_type_id = value
res += [{
'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'),
'location_src_id': from_loc.id,
'location_id': dest_loc.id,
'route_id': mto_route_id,
'action': 'move',
'picking_type_id': pick_type_id,
'procure_method': 'make_to_order',
'active': True,
'warehouse_id': warehouse.id,
}]
return res
def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None):
return {
'name': self._format_routename(cr, uid, warehouse, route_name, context=context),
'warehouse_selectable': False,
'product_selectable': True,
'product_categ_selectable': True,
'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step',
'sequence': 20,
}
def create_routes(self, cr, uid, ids, warehouse, context=None):
wh_route_ids = []
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#create reception route and rules
route_name, values = routes_dict[warehouse.reception_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
reception_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, reception_route_id))
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTS route and pull rules for delivery and a specific route MTO to be set on the product
route_name, values = routes_dict[warehouse.delivery_steps]
route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context)
#create the route and its pull rules
delivery_route_id = route_obj.create(cr, uid, route_vals, context=context)
wh_route_ids.append((4, delivery_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context)
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create MTO pull rule and link it to the generic MTO route
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context)
#create a route for cross dock operations, that can be set on products and product categories
route_name, values = routes_dict['crossdock']
crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context)
crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context)
wh_route_ids.append((4, crossdock_route_id))
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context)
for pull_rule in pull_rules_list:
# Fixed cross-dock is logically mto
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#create route selectable on the product to resupply the warehouse from another one
self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context)
#return routes and mto pull rule to store on the warehouse
return {
'route_ids': wh_route_ids,
'mto_pull_id': mto_pull_id,
'reception_route_id': reception_route_id,
'delivery_route_id': delivery_route_id,
'crossdock_route_id': crossdock_route_id,
}
def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None):
picking_type_obj = self.pool.get('stock.picking.type')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
route_obj = self.pool.get('stock.location.route')
new_reception_step = new_reception_step or warehouse.reception_steps
new_delivery_step = new_delivery_step or warehouse.delivery_steps
#change the default source and destination location and (de)activate picking types
input_loc = warehouse.wh_input_stock_loc_id
if new_reception_step == 'one_step':
input_loc = warehouse.lot_stock_id
output_loc = warehouse.wh_output_stock_loc_id
if new_delivery_step == 'ship_only':
output_loc = warehouse.lot_stock_id
picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context)
picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, {
'active': new_delivery_step != 'ship_only',
'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id,
}, context=context)
picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context)
routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context)
#update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context)
route_name, values = routes_dict[new_delivery_step]
route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context)
#create the pull rules
for pull_rule in pull_rules_list:
pull_obj.create(cr, uid, vals=pull_rule, context=context)
#update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it
pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context)
push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context)
route_name, values = routes_dict[new_reception_step]
route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context)
push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context)
#create the push/pull rules
for push_rule in push_rules_list:
push_obj.create(cr, uid, vals=push_rule, context=context)
for pull_rule in pull_rules_list:
#all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location
pull_rule['procure_method'] = 'make_to_order'
pull_obj.create(cr, uid, vals=pull_rule, context=context)
route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context)
#change MTO rule
dummy, values = routes_dict[new_delivery_step]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0]
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context)
return True
def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None):
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
#create new sequences
in_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context)
out_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context)
pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context)
pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context)
int_seq_id = seq_obj.create(cr, SUPERUSER_ID, values={'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context)
wh_stock_loc = warehouse.lot_stock_id
wh_input_stock_loc = warehouse.wh_input_stock_loc_id
wh_output_stock_loc = warehouse.wh_output_stock_loc_id
wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context)
#create in, out, internal picking types for warehouse
input_loc = wh_input_stock_loc
if warehouse.reception_steps == 'one_step':
input_loc = wh_stock_loc
output_loc = wh_output_stock_loc
if warehouse.delivery_steps == 'ship_only':
output_loc = wh_stock_loc
#choose the next available color for the picking types of this warehouse
color = 0
available_colors = [c%9 for c in range(3, 12)] # put flashy colors first
all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color')
#don't use sets to preserve the list order
for x in all_used_colors:
if x['color'] in available_colors:
available_colors.remove(x['color'])
if available_colors:
color = available_colors[0]
#order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship.
max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc')
max_sequence = max_sequence and max_sequence[0]['sequence'] or 0
in_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Receipts'),
'warehouse_id': warehouse.id,
'code': 'incoming',
'sequence_id': in_seq_id,
'default_location_src_id': supplier_loc.id,
'default_location_dest_id': input_loc.id,
'sequence': max_sequence + 1,
'color': color}, context=context)
out_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Delivery Orders'),
'warehouse_id': warehouse.id,
'code': 'outgoing',
'sequence_id': out_seq_id,
'return_picking_type_id': in_type_id,
'default_location_src_id': output_loc.id,
'default_location_dest_id': customer_loc.id,
'sequence': max_sequence + 4,
'color': color}, context=context)
picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context)
int_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Internal Transfers'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': int_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': wh_stock_loc.id,
'active': True,
'sequence': max_sequence + 2,
'color': color}, context=context)
pack_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pack'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pack_seq_id,
'default_location_src_id': wh_pack_stock_loc.id,
'default_location_dest_id': output_loc.id,
'active': warehouse.delivery_steps == 'pick_pack_ship',
'sequence': max_sequence + 3,
'color': color}, context=context)
pick_type_id = picking_type_obj.create(cr, uid, vals={
'name': _('Pick'),
'warehouse_id': warehouse.id,
'code': 'internal',
'sequence_id': pick_seq_id,
'default_location_src_id': wh_stock_loc.id,
'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id,
'active': warehouse.delivery_steps != 'ship_only',
'sequence': max_sequence + 2,
'color': color}, context=context)
#write picking types on WH
vals = {
'in_type_id': in_type_id,
'out_type_id': out_type_id,
'pack_type_id': pack_type_id,
'pick_type_id': pick_type_id,
'int_type_id': int_type_id,
}
super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals is None:
vals = {}
data_obj = self.pool.get('ir.model.data')
seq_obj = self.pool.get('ir.sequence')
picking_type_obj = self.pool.get('stock.picking.type')
location_obj = self.pool.get('stock.location')
#create view location for warehouse
loc_vals = {
'name': _(vals.get('code')),
'usage': 'view',
'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context)
vals['view_location_id'] = wh_loc_id
#create all location
def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'})
reception_steps = vals.get('reception_steps', def_values['reception_steps'])
delivery_steps = vals.get('delivery_steps', def_values['delivery_steps'])
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
sub_locations = [
{'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'},
{'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'},
{'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'},
{'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'},
{'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'},
]
for values in sub_locations:
loc_vals = {
'name': values['name'],
'usage': 'internal',
'location_id': wh_loc_id,
'active': values['active'],
}
if vals.get('company_id'):
loc_vals['company_id'] = vals.get('company_id')
location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive)
vals[values['field']] = location_id
#create WH
new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context)
warehouse = self.browse(cr, uid, new_id, context=context)
self.create_sequences_and_picking_types(cr, uid, warehouse, context=context)
#create routes and push/pull rules
new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context)
self.write(cr, uid, warehouse.id, new_objects_dict, context=context)
return new_id
def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None):
return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name
def _format_routename(self, cr, uid, obj, name, context=None):
return obj.name + ': ' + name
def get_routes_dict(self, cr, uid, ids, warehouse, context=None):
#fetch customer and supplier locations, for references
customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context)
return {
'one_step': (_('Receipt in 1 step'), []),
'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]),
'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]),
'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]),
}
def _handle_renaming(self, cr, uid, warehouse, name, code, context=None):
location_obj = self.pool.get('stock.location')
route_obj = self.pool.get('stock.location.route')
pull_obj = self.pool.get('procurement.rule')
push_obj = self.pool.get('stock.location.path')
#rename location
location_id = warehouse.lot_stock_id.location_id.id
location_obj.write(cr, uid, location_id, {'name': code}, context=context)
#rename route and push-pull rules
for route in warehouse.route_ids:
route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context)
for pull in route.pull_ids:
pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
for push in route.push_ids:
push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context)
#change the mto pull rule name
if warehouse.mto_pull_id.id:
pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context)
def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None):
""" Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """
#Check routes that are being delivered by this warehouse and change the rule going to transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context)
pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context)
if pulls:
pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context)
# Create or clean MTO rules
mto_route_id = self._get_mto_route(cr, uid, context=context)
if not change_to_multiple:
# If single delivery we should create the necessary MTO rules for the resupply
# pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
pull_recs = pull_obj.browse(cr, uid, pulls, context=context)
transfer_locs = list(set([x.location_id for x in pull_recs]))
vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs]
mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context)
for mto_pull_val in mto_pull_vals:
pull_obj.create(cr, uid, mto_pull_val, context=context)
else:
# We need to delete all the MTO pull rules, otherwise they risk to be used in the system
pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context)
if pulls:
pull_obj.unlink(cr, uid, pulls, context=context)
def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None):
"""
Will check if the resupply routes to this warehouse follow the changes of number of receipt steps
"""
#Check routes that are being delivered by this warehouse and change the rule coming from transit location
route_obj = self.pool.get("stock.location.route")
pull_obj = self.pool.get("procurement.rule")
routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context)
pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')])
if pulls:
pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context)
def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None):
if reception_new:
old_val = warehouse.reception_steps
new_val = reception_new
change_to_one = (old_val != 'one_step' and new_val == 'one_step')
change_to_multiple = (old_val == 'one_step' and new_val != 'one_step')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id
self._check_reception_resupply(cr, uid, warehouse, new_location, context=context)
if delivery_new:
old_val = warehouse.delivery_steps
new_val = delivery_new
change_to_one = (old_val != 'ship_only' and new_val == 'ship_only')
change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only')
if change_to_one or change_to_multiple:
new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id
self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
seq_obj = self.pool.get('ir.sequence')
route_obj = self.pool.get('stock.location.route')
context_with_inactive = context.copy()
context_with_inactive['active_test'] = False
for warehouse in self.browse(cr, uid, ids, context=context_with_inactive):
#first of all, check if we need to delete and recreate route
if vals.get('reception_steps') or vals.get('delivery_steps'):
#activate and deactivate location according to reception and delivery option
self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context)
# switch between route
self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive)
# Check if we need to change something to resupply warehouses and associated MTO rules
self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context)
if vals.get('code') or vals.get('name'):
name = warehouse.name
#rename sequence
if vals.get('name'):
name = vals.get('name', warehouse.name)
self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive)
if warehouse.in_type_id:
seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context)
seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context)
seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context)
if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'):
for cmd in vals.get('resupply_wh_ids'):
if cmd[0] == 6:
new_ids = set(cmd[2])
old_ids = set([wh.id for wh in warehouse.resupply_wh_ids])
to_add_wh_ids = new_ids - old_ids
if to_add_wh_ids:
supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context)
self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context)
to_remove_wh_ids = old_ids - new_ids
if to_remove_wh_ids:
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context)
if to_remove_route_ids:
route_obj.unlink(cr, uid, to_remove_route_ids, context=context)
else:
#not implemented
pass
if 'default_resupply_wh_id' in vals:
if vals.get('default_resupply_wh_id') == warehouse.id:
raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!'))
if warehouse.default_resupply_wh_id:
#remove the existing resupplying route on the warehouse
to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context)
for inter_wh_route_id in to_remove_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]})
if vals.get('default_resupply_wh_id'):
#assign the new resupplying route on all products
to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context)
for inter_wh_route_id in to_assign_route_ids:
self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]})
return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context)
def get_all_routes_for_wh(self, cr, uid, warehouse, context=None):
route_obj = self.pool.get("stock.location.route")
all_routes = [route.id for route in warehouse.route_ids]
all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context)
all_routes += [warehouse.mto_pull_id.route_id.id]
return all_routes
def view_all_routes_for_wh(self, cr, uid, ids, context=None):
all_routes = []
for wh in self.browse(cr, uid, ids, context=context):
all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context)
domain = [('id', 'in', all_routes)]
return {
'name': _('Warehouse\'s Routes'),
'domain': domain,
'res_model': 'stock.location.route',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'tree,form',
'view_type': 'form',
'limit': 20
}
class stock_location_path(osv.osv):
_name = "stock.location.path"
_description = "Pushed Flows"
_order = "name"
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids, context=context):
res += [x.id for x in route.push_ids]
return res
_columns = {
'name': fields.char('Operation Name', required=True),
'company_id': fields.many2one('res.company', 'Company'),
'route_id': fields.many2one('stock.location.route', 'Route'),
'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True),
'delay': fields.integer('Delay (days)', help="Number of days to do this transition"),
'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"),
'auto': fields.selection(
[('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')],
'Automatic Move',
required=True, select=1,
help="This is used to define paths the product has to follow within the location tree.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'sequence': fields.integer('Sequence'),
}
_defaults = {
'auto': 'auto',
'delay': 0,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c),
'propagate': True,
'active': True,
}
def _prepare_push_apply(self, cr, uid, rule, move, context=None):
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return {
'origin': move.origin or move.picking_id.name or "/",
'location_id': move.location_dest_id.id,
'location_dest_id': rule.location_dest_id.id,
'date': newdate,
'company_id': rule.company_id and rule.company_id.id or False,
'date_expected': newdate,
'picking_id': False,
'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False,
'propagate': rule.propagate,
'push_rule_id': rule.id,
'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False,
}
def _apply(self, cr, uid, rule, move, context=None):
move_obj = self.pool.get('stock.move')
newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
if rule.auto == 'transparent':
old_dest_location = move.location_dest_id.id
move_obj.write(cr, uid, [move.id], {
'date': newdate,
'date_expected': newdate,
'location_dest_id': rule.location_dest_id.id
})
#avoid looping if a push rule is not well configured
if rule.location_dest_id.id != old_dest_location:
#call again push_apply to see if a next step is defined
move_obj._push_apply(cr, uid, [move], context=context)
else:
vals = self._prepare_push_apply(cr, uid, rule, move, context=context)
move_id = move_obj.copy(cr, uid, move.id, vals, context=context)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': move_id,
})
move_obj.action_confirm(cr, uid, [move_id], context=None)
# -------------------------
# Packaging related stuff
# -------------------------
from openerp.report import report_sxw
class stock_package(osv.osv):
"""
These are the packages, containing quants and/or other packages
"""
_name = "stock.quant.package"
_description = "Physical Packages"
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
def name_get(self, cr, uid, ids, context=None):
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
res[m.id] = m.name
parent = m.parent_id
while parent:
res[m.id] = parent.name + ' / ' + res[m.id]
parent = parent.parent_id
return res
def _get_packages(self, cr, uid, ids, context=None):
"""Returns packages from quants for store"""
res = set()
for quant in self.browse(cr, uid, ids, context=context):
pack = quant.package_id
while pack:
res.add(pack.id)
pack = pack.parent_id
return list(res)
def _get_package_info(self, cr, uid, ids, name, args, context=None):
quant_obj = self.pool.get("stock.quant")
default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids)
for pack in self.browse(cr, uid, ids, context=context):
quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context)
if quants:
quant = quant_obj.browse(cr, uid, quants[0], context=context)
res[pack.id]['location_id'] = quant.location_id.id
res[pack.id]['owner_id'] = quant.owner_id.id
res[pack.id]['company_id'] = quant.company_id.id
else:
res[pack.id]['location_id'] = False
res[pack.id]['owner_id'] = False
res[pack.id]['company_id'] = False
return res
def _get_packages_to_relocate(self, cr, uid, ids, context=None):
res = set()
for pack in self.browse(cr, uid, ids, context=context):
res.add(pack.id)
if pack.parent_id:
res.add(pack.parent_id.id)
return list(res)
_columns = {
'name': fields.char('Package Reference', select=True, copy=False),
'complete_name': fields.function(_complete_name, type='char', string="Package Name",),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True),
'ul_id': fields.many2one('product.ul', 'Logistic Unit'),
'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package",
store={
'stock.quant': (_get_packages, ['location_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True),
'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True),
'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True),
'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package",
store={
'stock.quant': (_get_packages, ['company_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package",
store={
'stock.quant': (_get_packages, ['owner_id'], 10),
'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10),
}, readonly=True, select=True),
}
_defaults = {
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack')
}
def _check_location_constraint(self, cr, uid, packs, context=None):
'''checks that all quants in a package are stored in the same location. This function cannot be used
as a constraint because it needs to be checked on pack operations (they may not call write on the
package)
'''
quant_obj = self.pool.get('stock.quant')
for pack in packs:
parent = pack
while parent.parent_id:
parent = parent.parent_id
quant_ids = self.get_content(cr, uid, [parent.id], context=context)
quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0]
location_id = quants and quants[0].location_id.id or False
if not [quant.location_id.id == location_id for quant in quants]:
raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location'))
return True
def action_print(self, cr, uid, ids, context=None):
context = dict(context or {}, active_ids=ids)
return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context)
def unpack(self, cr, uid, ids, context=None):
quant_obj = self.pool.get('stock.quant')
for package in self.browse(cr, uid, ids, context=context):
quant_ids = [quant.id for quant in package.quant_ids]
quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context)
children_package_ids = [child_package.id for child_package in package.children_ids]
self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context)
#delete current package since it contains nothing anymore
self.unlink(cr, uid, ids, context=context)
return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context)
def get_content(self, cr, uid, ids, context=None):
child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context)
def get_content_package(self, cr, uid, ids, context=None):
quants_ids = self.get_content(cr, uid, ids, context=context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context)
res['domain'] = [('id', 'in', quants_ids)]
return res
def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None):
''' find the total of given product 'product_id' inside the given package 'package_id'''
quant_obj = self.pool.get('stock.quant')
all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context)
total = 0
for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context):
if quant.product_id.id == product_id:
total += quant.qty
return total
def _get_all_products_quantities(self, cr, uid, package_id, context=None):
'''This function computes the different product quantities for the given package
'''
quant_obj = self.pool.get('stock.quant')
res = {}
for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)):
if quant.product_id.id not in res:
res[quant.product_id.id] = 0
res[quant.product_id.id] += quant.qty
return res
def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None):
stock_pack_operation_obj = self.pool.get('stock.pack.operation')
if default is None:
default = {}
new_package_id = self.copy(cr, uid, id, default_pack_values, context=context)
default['result_package_id'] = new_package_id
op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context)
for op_id in op_ids:
stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context)
class stock_pack_operation(osv.osv):
_name = "stock.pack.operation"
_description = "Packing Operation"
def _get_remaining_prod_quantities(self, cr, uid, operation, context=None):
'''Get the remaining quantities per product on an operation with a package. This function returns a dictionary'''
#if the operation doesn't concern a package, it's not relevant to call this function
if not operation.package_id or operation.product_id:
return {operation.product_id.id: operation.remaining_qty}
#get the total of products the package contains
res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context)
#reduce by the quantities linked to a move
for record in operation.linked_move_operation_ids:
if record.move_id.product_id.id not in res:
res[record.move_id.product_id.id] = 0
res[record.move_id.product_id.id] -= record.qty
return res
def _get_remaining_qty(self, cr, uid, ids, name, args, context=None):
uom_obj = self.pool.get('product.uom')
res = {}
for ops in self.browse(cr, uid, ids, context=context):
res[ops.id] = 0
if ops.package_id and not ops.product_id:
#dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products).
#should use _get_remaining_prod_quantities instead
continue
else:
qty = ops.product_qty
if ops.product_uom_id:
qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context)
for record in ops.linked_move_operation_ids:
qty -= record.qty
res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding)
return res
def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context)
if product_id and not product_uom_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
res['value']['product_uom_id'] = product.uom_id.id
return res
def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None):
res = {'value': {}}
uom_obj = self.pool.get('product.uom')
if product_id:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
product_uom_id = product_uom_id or product.uom_id.id
selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context)
if selected_uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {
'title': _('Warning: wrong UoM!'),
'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name)
}
if product_qty and 'warning' not in res:
rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True)
if rounded_qty != product_qty:
res['warning'] = {
'title': _('Warning: wrong quantity!'),
'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name)
}
return res
_columns = {
'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True),
'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')),
'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2
'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'),
'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'),
'date': fields.datetime('Date', required=True),
'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"),
#'update_cost': fields.boolean('Need cost update'),
'cost': fields.float("Cost", help="Unit Cost for this product line"),
'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'),
'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'),
'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "),
'location_id': fields.many2one('stock.location', 'Source Location', required=True),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True),
'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True),
}
_defaults = {
'date': fields.date.context_today,
'qty_done': 0,
'processed': lambda *a: 'false',
}
def write(self, cr, uid, ids, vals, context=None):
context = context or {}
res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context)
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get("no_recompute"):
pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)]))
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context)
return res
def create(self, cr, uid, vals, context=None):
context = context or {}
res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context)
if vals.get("picking_id") and not context.get("no_recompute"):
self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context)
return res_id
def action_drop_down(self, cr, uid, ids, context=None):
''' Used by barcode interface to say that pack_operation has been moved from src location
to destination location, if qty_done is less than product_qty than we have to split the
operation in two to process the one with the qty moved
'''
processed_ids = []
move_obj = self.pool.get("stock.move")
for pack_op in self.browse(cr, uid, ids, context=None):
if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id:
move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context)
op = pack_op.id
if pack_op.qty_done < pack_op.product_qty:
# we split the operation in two
op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context)
self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0, 'lot_id': False}, context=context)
processed_ids.append(op)
self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context)
def create_and_assign_lot(self, cr, uid, id, name, context=None):
''' Used by barcode interface to create a new lot and assign it to the operation
'''
obj = self.browse(cr,uid,id,context)
product_id = obj.product_id.id
val = {'product_id': product_id}
new_lot_id = False
if name:
lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context)
if lots:
new_lot_id = lots[0]
val.update({'name': name})
if not new_lot_id:
new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context)
self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context)
def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None):
'''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it
:param domain: list of tuple directly reusable as a domain
context can receive a key 'current_package_id' with the package to consider for this operation
returns True
'''
if context is None:
context = {}
#if current_package_id is given in the context, we increase the number of items in this package
package_clause = [('result_package_id', '=', context.get('current_package_id', False))]
existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context)
todo_operation_ids = []
if existing_operation_ids:
if filter_visible:
todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids]
else:
todo_operation_ids = existing_operation_ids
if todo_operation_ids:
#existing operation found for the given domain and picking => increment its quantity
operation_id = todo_operation_ids[0]
op_obj = self.browse(cr, uid, operation_id, context=context)
qty = op_obj.qty_done
if increment:
qty += 1
else:
qty -= 1 if qty >= 1 else 0
if qty == 0 and op_obj.product_qty == 0:
#we have a line with 0 qty set, so delete it
self.unlink(cr, uid, [operation_id], context=context)
return False
self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context)
else:
#no existing operation found for the given domain and picking => create a new one
picking_obj = self.pool.get("stock.picking")
picking = picking_obj.browse(cr, uid, picking_id, context=context)
values = {
'picking_id': picking_id,
'product_qty': 0,
'location_id': picking.location_id.id,
'location_dest_id': picking.location_dest_id.id,
'qty_done': 1,
}
for key in domain:
var_name, dummy, value = key
uom_id = False
if var_name == 'product_id':
uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id
update_dict = {var_name: value}
if uom_id:
update_dict['product_uom_id'] = uom_id
values.update(update_dict)
operation_id = self.create(cr, uid, values, context=context)
return operation_id
class stock_move_operation_link(osv.osv):
"""
Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects
"""
_name = "stock.move.operation.link"
_description = "Link between stock moves and pack operations"
_columns = {
'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."),
'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"),
'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"),
'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"),
}
def get_specific_domain(self, cr, uid, record, context=None):
'''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move,
having the record given as parameter making the link between the stock move and a pack operation'''
op = record.operation_id
domain = []
if op.package_id and op.product_id:
#if removing a product from a box, we restrict the choice of quants to this box
domain.append(('package_id', '=', op.package_id.id))
elif op.package_id:
#if moving a box, we allow to take everything from inside boxes as well
domain.append(('package_id', 'child_of', [op.package_id.id]))
else:
#if not given any information about package, we don't open boxes
domain.append(('package_id', '=', False))
#if lot info is given, we restrict choice to this lot otherwise we can take any
if op.lot_id:
domain.append(('lot_id', '=', op.lot_id.id))
#if owner info is given, we restrict to this owner otherwise we restrict to no owner
if op.owner_id:
domain.append(('owner_id', '=', op.owner_id.id))
else:
domain.append(('owner_id', '=', False))
return domain
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def subtract_procurements(self, cr, uid, orderpoint, context=None):
'''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it.
'''
qty = 0
uom_obj = self.pool.get("product.uom")
for procurement in orderpoint.procurement_ids:
if procurement.state in ('cancel', 'done'):
continue
procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context)
for move in procurement.move_ids:
#need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet
if move.state not in ('draft'):
#if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted
procurement_qty -= move.product_qty
qty += procurement_qty
return qty
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
def action_view_proc_to_process(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context)
result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements')
if not result:
return False
result = act_obj.read(cr, uid, [result[1]], context=context)[0]
result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]"
return result
_columns = {
'name': fields.char('Name', required=True, copy=False),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]),
'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="When the virtual stock goes below the Min Quantity, Odoo generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.float('Qty Multiple', required=True,
digits_compute=dp.get_precision('Product Unit of Measure'),
help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "),
'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'),
'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': lambda *a: 1,
'logic': lambda *a: 'max',
'qty_multiple': lambda *a: 1,
'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '',
'product_uom': lambda self, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
warehouse_obj = self.pool.get('stock.warehouse')
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or []
res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False
if 'location_id' not in res:
res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
class stock_picking_type(osv.osv):
_name = "stock.picking.type"
_description = "The picking type determines the picking view"
_order = 'sequence'
def open_barcode_interface(self, cr, uid, ids, context=None):
final_url = "/barcode/web/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0'
return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'}
def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None):
picking_obj = self.pool.get('stock.picking')
res = {}
for picking_type_id in ids:
#get last 10 pickings of this type
picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context)
tristates = []
for picking in picking_obj.browse(cr, uid, picking_ids, context=context):
if picking.date_done > picking.date:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1})
elif picking.backorder_id:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0})
else:
tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1})
res[picking_type_id] = json.dumps(tristates)
return res
def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None):
obj = self.pool.get('stock.picking')
domains = {
'count_picking_draft': [('state', '=', 'draft')],
'count_picking_waiting': [('state', '=', 'confirmed')],
'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))],
'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))],
'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))],
}
result = {}
for field in domains:
data = obj.read_group(cr, uid, domains[field] +
[('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)],
['picking_type_id'], ['picking_type_id'], context=context)
count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data))
for tid in ids:
result.setdefault(tid, {})[field] = count.get(tid, 0)
for tid in ids:
if result[tid]['count_picking']:
result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking']
result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking']
else:
result[tid]['rate_picking_late'] = 0
result[tid]['rate_picking_backorders'] = 0
return result
def onchange_picking_code(self, cr, uid, ids, picking_code=False):
if not picking_code:
return False
obj_data = self.pool.get('ir.model.data')
stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock')
result = {
'default_location_src_id': stock_loc,
'default_location_dest_id': stock_loc,
}
if picking_code == 'incoming':
result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers')
elif picking_code == 'outgoing':
result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers')
return {'value': result}
def _get_name(self, cr, uid, ids, field_names, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
def name_get(self, cr, uid, ids, context=None):
"""Overides orm name_get method to display 'Warehouse_name: PickingType_name' """
if context is None:
context = {}
if not isinstance(ids, list):
ids = [ids]
res = []
if not ids:
return res
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.warehouse_id:
name = record.warehouse_id.name + ': ' +name
if context.get('special_shortened_wh_name'):
if record.warehouse_id:
name = record.warehouse_id.name
else:
name = _('Customer') + ' (' + record.name + ')'
res.append((record.id, name))
return res
def _default_warehouse(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context)
return res and res[0] or False
_columns = {
'name': fields.char('Picking Type Name', translate=True, required=True),
'complete_name': fields.function(_get_name, type='char', string='Name'),
'color': fields.integer('Color'),
'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"),
'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True),
'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'),
'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'),
'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True),
'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'),
'active': fields.boolean('Active'),
# Statistics for the kanban view
'last_done_picking': fields.function(_get_tristate_values,
type='char',
string='Last 10 Done Pickings'),
'count_picking_draft': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_ready': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_waiting': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'count_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_late': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
'rate_picking_backorders': fields.function(_get_picking_count,
type='integer', multi='_get_picking_count'),
}
_defaults = {
'warehouse_id': _default_warehouse,
'active': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| oliverhr/odoo | addons/stock/stock.py | Python | agpl-3.0 | 269,349 |
import datetime
import sqlalchemy.orm.exc
from nylas.logging import get_logger
log = get_logger()
from inbox.auth.oauth import OAuthAuthHandler
from inbox.basicauth import OAuthError
from inbox.models import Namespace
from inbox.config import config
from inbox.models.backends.outlook import OutlookAccount
from inbox.models.backends.oauth import token_manager
from inbox.util.url import url_concat
PROVIDER = '_outlook'
AUTH_HANDLER_CLS = '_OutlookAuthHandler'
# Outlook OAuth app credentials
OAUTH_CLIENT_ID = config.get_required('MS_LIVE_OAUTH_CLIENT_ID')
OAUTH_CLIENT_SECRET = config.get_required('MS_LIVE_OAUTH_CLIENT_SECRET')
OAUTH_REDIRECT_URI = config.get_required('MS_LIVE_OAUTH_REDIRECT_URI')
OAUTH_AUTHENTICATE_URL = 'https://login.live.com/oauth20_authorize.srf'
OAUTH_ACCESS_TOKEN_URL = 'https://login.live.com/oauth20_token.srf'
OAUTH_USER_INFO_URL = 'https://apis.live.net/v5.0/me'
OAUTH_BASE_URL = 'https://apis.live.net/v5.0/'
OAUTH_SCOPE = ' '.join([
'wl.basic', # Read access for basic profile info + contacts
'wl.offline_access', # ability to read / update user's info at any time
'wl.emails', # Read access to user's email addresses
'wl.imap']) # R/W access to user's email using IMAP / SMTP
class _OutlookAuthHandler(OAuthAuthHandler):
OAUTH_CLIENT_ID = OAUTH_CLIENT_ID
OAUTH_CLIENT_SECRET = OAUTH_CLIENT_SECRET
OAUTH_REDIRECT_URI = OAUTH_REDIRECT_URI
OAUTH_AUTHENTICATE_URL = OAUTH_AUTHENTICATE_URL
OAUTH_ACCESS_TOKEN_URL = OAUTH_ACCESS_TOKEN_URL
OAUTH_USER_INFO_URL = OAUTH_USER_INFO_URL
OAUTH_BASE_URL = OAUTH_BASE_URL
OAUTH_SCOPE = OAUTH_SCOPE
def create_account(self, db_session, email_address, response):
email_address = response.get('emails')['account']
try:
account = db_session.query(OutlookAccount).filter_by(
email_address=email_address).one()
except sqlalchemy.orm.exc.NoResultFound:
namespace = Namespace()
account = OutlookAccount(namespace=namespace)
account.refresh_token = response['refresh_token']
account.date = datetime.datetime.utcnow()
tok = response.get('access_token')
expires_in = response.get('expires_in')
token_manager.cache_token(account, tok, expires_in)
account.scope = response.get('scope')
account.email_address = email_address
account.o_id_token = response.get('user_id')
account.o_id = response.get('id')
account.name = response.get('name')
account.gender = response.get('gender')
account.link = response.get('link')
account.locale = response.get('locale')
# Unlike Gmail, Outlook doesn't return the client_id and secret here
account.client_id = OAUTH_CLIENT_ID
account.client_secret = OAUTH_CLIENT_SECRET
# Ensure account has sync enabled.
account.enable_sync()
return account
def validate_token(self, access_token):
return self._get_user_info(access_token)
def interactive_auth(self, email_address=None):
url_args = {'redirect_uri': self.OAUTH_REDIRECT_URI,
'client_id': self.OAUTH_CLIENT_ID,
'response_type': 'code',
'scope': self.OAUTH_SCOPE,
'access_type': 'offline'}
url = url_concat(self.OAUTH_AUTHENTICATE_URL, url_args)
print ('Please visit the following url to allow access to this '
'application. The response will provide '
'code=[AUTHORIZATION_CODE]&lc=XXXX in the location. Paste the'
' AUTHORIZATION_CODE here:')
print '\n{}'.format(url)
while True:
auth_code = raw_input('Enter authorization code: ').strip()
try:
auth_response = self._get_authenticated_user(auth_code)
return auth_response
except OAuthError:
print '\nInvalid authorization code, try again...\n'
auth_code = None
| Eagles2F/sync-engine | inbox/auth/_outlook.py | Python | agpl-3.0 | 4,066 |
#!/usr/bin/env python
'''
Copyright (C) 2005 Aaron Spike, [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import random, math, inkex, cubicsuperpath
def randomize((x, y), rx, ry, norm):
if norm:
r = abs(random.normalvariate(0.0,0.5*max(rx, ry)))
else:
r = random.uniform(0.0,max(rx, ry))
a = random.uniform(0.0,2*math.pi)
x += math.cos(a)*rx
y += math.sin(a)*ry
return [x, y]
class RadiusRandomize(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("--title")
self.OptionParser.add_option("-x", "--radiusx",
action="store", type="float",
dest="radiusx", default=10.0,
help="Randomly move nodes and handles within this radius, X")
self.OptionParser.add_option("-y", "--radiusy",
action="store", type="float",
dest="radiusy", default=10.0,
help="Randomly move nodes and handles within this radius, Y")
self.OptionParser.add_option("-c", "--ctrl",
action="store", type="inkbool",
dest="ctrl", default=True,
help="Randomize control points")
self.OptionParser.add_option("-e", "--end",
action="store", type="inkbool",
dest="end", default=True,
help="Randomize nodes")
self.OptionParser.add_option("-n", "--norm",
action="store", type="inkbool",
dest="norm", default=True,
help="Use normal distribution")
def effect(self):
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
d = node.get('d')
p = cubicsuperpath.parsePath(d)
for subpath in p:
for csp in subpath:
if self.options.end:
delta=randomize([0,0], self.options.radiusx, self.options.radiusy, self.options.norm)
csp[0][0]+=delta[0]
csp[0][1]+=delta[1]
csp[1][0]+=delta[0]
csp[1][1]+=delta[1]
csp[2][0]+=delta[0]
csp[2][1]+=delta[1]
if self.options.ctrl:
csp[0]=randomize(csp[0], self.options.radiusx, self.options.radiusy, self.options.norm)
csp[2]=randomize(csp[2], self.options.radiusx, self.options.radiusy, self.options.norm)
node.set('d',cubicsuperpath.formatPath(p))
if __name__ == '__main__':
e = RadiusRandomize()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 encoding=utf-8 textwidth=99
| step21/inkscape-osx-packaging-native | packaging/macosx/Inkscape.app/Contents/Resources/extensions/radiusrand.py | Python | lgpl-2.1 | 3,583 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Vasp(MakefilePackage):
"""
The Vienna Ab initio Simulation Package (VASP)
is a computer program for atomic scale materials modelling,
e.g. electronic structure calculations
and quantum-mechanical molecular dynamics, from first principles.
"""
homepage = "https://vasp.at"
url = "file://{0}/vasp.5.4.4.pl2.tgz".format(os.getcwd())
manual_download = True
version('6.2.0', sha256='49e7ba351bd634bc5f5f67a8ef1e38e64e772857a1c02f602828898a84197e25')
version('6.1.1', sha256='e37a4dfad09d3ad0410833bcd55af6b599179a085299026992c2d8e319bf6927')
version('5.4.4.pl2', sha256='98f75fd75399a23d76d060a6155f4416b340a1704f256a00146f89024035bc8e')
version('5.4.4', sha256='5bd2449462386f01e575f9adf629c08cb03a13142806ffb6a71309ca4431cfb3')
resource(name='vaspsol',
git='https://github.com/henniggroup/VASPsol.git',
tag='V1.0',
when='+vaspsol')
variant('openmp', default=False,
description='Enable openmp build')
variant('scalapack', default=False,
description='Enables build with SCALAPACK')
variant('cuda', default=False,
description='Enables running on Nvidia GPUs')
variant('vaspsol', default=False,
description='Enable VASPsol implicit solvation model\n'
'https://github.com/henniggroup/VASPsol')
depends_on('rsync', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api')
depends_on('mpi', type=('build', 'link', 'run'))
depends_on('scalapack', when='+scalapack')
depends_on('cuda', when='+cuda')
depends_on('qd', when='%nvhpc')
conflicts('%gcc@:8', msg='GFortran before 9.x does not support all features needed to build VASP')
conflicts('+vaspsol', when='+cuda', msg='+vaspsol only available for CPU')
conflicts('+openmp', when='@:6.1.1', msg='openmp support started from 6.2')
parallel = False
def edit(self, spec, prefix):
if '%gcc' in spec:
if '+openmp' in spec:
make_include = join_path('arch', 'makefile.include.linux_gnu_omp')
else:
make_include = join_path('arch', 'makefile.include.linux_gnu')
elif '%nvhpc' in spec:
make_include = join_path('arch', 'makefile.include.linux_pgi')
filter_file('-pgc++libs', '-c++libs', make_include, string=True)
filter_file('pgcc', spack_cc, make_include)
filter_file('pgc++', spack_cxx, make_include, string=True)
filter_file('pgfortran', spack_fc, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/include',
spec['qd'].prefix.include, make_include)
filter_file('/opt/pgi/qd-2.3.17/install/lib',
spec['qd'].prefix.lib, make_include)
elif '%aocc' in spec:
if '+openmp' in spec:
copy(
join_path('arch', 'makefile.include.linux_gnu_omp'),
join_path('arch', 'makefile.include.linux_aocc_omp')
)
make_include = join_path('arch', 'makefile.include.linux_aocc_omp')
else:
copy(
join_path('arch', 'makefile.include.linux_gnu'),
join_path('arch', 'makefile.include.linux_aocc')
)
make_include = join_path('arch', 'makefile.include.linux_aocc')
filter_file(
'gcc', '{0} {1}'.format(spack_cc, '-Mfree'),
make_include, string=True
)
filter_file('g++', spack_cxx, make_include, string=True)
filter_file('^CFLAGS_LIB[ ]{0,}=.*$',
'CFLAGS_LIB = -O3', make_include)
filter_file('^FFLAGS_LIB[ ]{0,}=.*$',
'FFLAGS_LIB = -O2', make_include)
filter_file('^OFLAG[ ]{0,}=.*$',
'OFLAG = -O3', make_include)
filter_file('^FC[ ]{0,}=.*$',
'FC = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
filter_file('^FCL[ ]{0,}=.*$',
'FCL = {0}'.format(spec['mpi'].mpifc),
make_include, string=True)
else:
if '+openmp' in spec:
make_include = join_path('arch',
'makefile.include.linux_{0}_omp'.
format(spec.compiler.name))
else:
make_include = join_path('arch',
'makefile.include.linux_' +
spec.compiler.name)
os.rename(make_include, 'makefile.include')
# This bunch of 'filter_file()' is to make these options settable
# as environment variables
filter_file('^CPP_OPTIONS[ ]{0,}=[ ]{0,}',
'CPP_OPTIONS ?= ',
'makefile.include')
filter_file('^FFLAGS[ ]{0,}=[ ]{0,}',
'FFLAGS ?= ',
'makefile.include')
filter_file('^LIBDIR[ ]{0,}=.*$', '', 'makefile.include')
filter_file('^BLAS[ ]{0,}=.*$', 'BLAS ?=', 'makefile.include')
filter_file('^LAPACK[ ]{0,}=.*$', 'LAPACK ?=', 'makefile.include')
filter_file('^FFTW[ ]{0,}?=.*$', 'FFTW ?=', 'makefile.include')
filter_file('^MPI_INC[ ]{0,}=.*$', 'MPI_INC ?=', 'makefile.include')
filter_file('-DscaLAPACK.*$\n', '', 'makefile.include')
filter_file('^SCALAPACK[ ]{0,}=.*$', 'SCALAPACK ?=', 'makefile.include')
if '+cuda' in spec:
filter_file('^OBJECTS_GPU[ ]{0,}=.*$',
'OBJECTS_GPU ?=',
'makefile.include')
filter_file('^CPP_GPU[ ]{0,}=.*$',
'CPP_GPU ?=',
'makefile.include')
filter_file('^CFLAGS[ ]{0,}=.*$',
'CFLAGS ?=',
'makefile.include')
if '+vaspsol' in spec:
copy('VASPsol/src/solvation.F', 'src/')
def setup_build_environment(self, spack_env):
spec = self.spec
cpp_options = ['-DMPI -DMPI_BLOCK=8000',
'-Duse_collective', '-DCACHE_SIZE=4000',
'-Davoidalloc', '-Duse_bse_te',
'-Dtbdyn', '-Duse_shmem']
if '%nvhpc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxPGI\\"', '-DPGI16',
'-Dqd_emulate'])
elif '%aocc' in self.spec:
cpp_options.extend(['-DHOST=\\"LinuxGNU\\"',
'-Dfock_dblbuf'])
if '+openmp' in self.spec:
cpp_options.extend(['-D_OPENMP'])
else:
cpp_options.append('-DHOST=\\"LinuxGNU\\"')
if self.spec.satisfies('@6:'):
cpp_options.append('-Dvasp6')
cflags = ['-fPIC', '-DADD_']
fflags = []
if '%gcc' in spec or '%intel' in spec:
fflags.append('-w')
elif '%nvhpc' in spec:
fflags.extend(['-Mnoupcase', '-Mbackslash', '-Mlarge_arrays'])
elif '%aocc' in spec:
fflags.extend(['-fno-fortran-main', '-Mbackslash', '-ffast-math'])
spack_env.set('BLAS', spec['blas'].libs.ld_flags)
spack_env.set('LAPACK', spec['lapack'].libs.ld_flags)
spack_env.set('FFTW', spec['fftw-api'].prefix)
spack_env.set('MPI_INC', spec['mpi'].prefix.include)
if '%nvhpc' in spec:
spack_env.set('QD', spec['qd'].prefix)
if '+scalapack' in spec:
cpp_options.append('-DscaLAPACK')
spack_env.set('SCALAPACK', spec['scalapack'].libs.ld_flags)
if '+cuda' in spec:
cpp_gpu = ['-DCUDA_GPU', '-DRPROMU_CPROJ_OVERLAP',
'-DCUFFT_MIN=28', '-DUSE_PINNED_MEMORY']
objects_gpu = ['fftmpiw.o', 'fftmpi_map.o', 'fft3dlib.o',
'fftw3d_gpu.o', 'fftmpiw_gpu.o']
cflags.extend(['-DGPUSHMEM=300', '-DHAVE_CUBLAS'])
spack_env.set('CUDA_ROOT', spec['cuda'].prefix)
spack_env.set('CPP_GPU', ' '.join(cpp_gpu))
spack_env.set('OBJECTS_GPU', ' '.join(objects_gpu))
if '+vaspsol' in spec:
cpp_options.append('-Dsol_compat')
if spec.satisfies('%gcc@10:'):
fflags.append('-fallow-argument-mismatch')
# Finally
spack_env.set('CPP_OPTIONS', ' '.join(cpp_options))
spack_env.set('CFLAGS', ' '.join(cflags))
spack_env.set('FFLAGS', ' '.join(fflags))
def build(self, spec, prefix):
if '+cuda' in self.spec:
make('gpu', 'gpu_ncl')
else:
make('std', 'gam', 'ncl')
def install(self, spec, prefix):
install_tree('bin/', prefix.bin)
| LLNL/spack | var/spack/repos/builtin/packages/vasp/package.py | Python | lgpl-2.1 | 9,155 |
#!/usr/bin/env python3
import sys
from testrunner import run
def testfunc(child):
child.expect("All up, running the shell now")
child.sendline("ifconfig")
child.expect(r"Iface\s+(\d+)\s+HWaddr:")
if __name__ == "__main__":
sys.exit(run(testfunc, timeout=1, echo=False))
| cladmi/RIOT | tests/nordic_softdevice/tests/01-run.py | Python | lgpl-2.1 | 291 |
# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for the class
:class:`iris.fileformats.um._fast_load_structured_fields.FieldCollation`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised
# before importing anything else.
import iris.tests as tests
from iris._lazy_data import as_lazy_data
from netcdftime import datetime
import numpy as np
from iris.fileformats.um._fast_load_structured_fields import FieldCollation
import iris.fileformats.pp
class Test___init__(tests.IrisTest):
def test_no_fields(self):
with self.assertRaises(AssertionError):
FieldCollation([])
class Test_fields(tests.IrisTest):
def test_preserve_members(self):
fields = ('foo', 'bar', 'wibble')
collation = FieldCollation(fields)
self.assertEqual(collation.fields, fields)
def _make_field(lbyr=None, lbyrd=None, lbft=None,
blev=None, bhlev=None, data=None):
header = [0] * 64
if lbyr is not None:
header[0] = lbyr
header[1] = 1
header[2] = 1
if lbyrd is not None:
header[6] = lbyrd
header[7] = 1
header[8] = 1
if lbft is not None:
header[13] = lbft
if blev is not None:
header[51] = blev
if bhlev is not None:
header[53] = bhlev
field = iris.fileformats.pp.PPField3(header)
if data is not None:
_data = _make_data(data)
field.data = _data
return field
def _make_data(fill_value):
shape = (10, 10)
return as_lazy_data(np.ones(shape)*fill_value)
class Test_data(tests.IrisTest):
# Test order of the data attribute when fastest-varying element is changed.
def test_t1_varies_faster(self):
collation = FieldCollation(
[_make_field(lbyr=2013, lbyrd=2000, data=0),
_make_field(lbyr=2014, lbyrd=2000, data=1),
_make_field(lbyr=2015, lbyrd=2000, data=2),
_make_field(lbyr=2013, lbyrd=2001, data=3),
_make_field(lbyr=2014, lbyrd=2001, data=4),
_make_field(lbyr=2015, lbyrd=2001, data=5)])
result = collation.data[:, :, 0, 0]
expected = [[0, 1, 2], [3, 4, 5]]
self.assertArrayEqual(result, expected)
def test_t2_varies_faster(self):
collation = FieldCollation(
[_make_field(lbyr=2013, lbyrd=2000, data=0),
_make_field(lbyr=2013, lbyrd=2001, data=1),
_make_field(lbyr=2013, lbyrd=2002, data=2),
_make_field(lbyr=2014, lbyrd=2000, data=3),
_make_field(lbyr=2014, lbyrd=2001, data=4),
_make_field(lbyr=2014, lbyrd=2002, data=5)])
result = collation.data[:, :, 0, 0]
expected = [[0, 1, 2], [3, 4, 5]]
self.assertArrayEqual(result, expected)
class Test_element_arrays_and_dims(tests.IrisTest):
def test_single_field(self):
field = _make_field(2013)
collation = FieldCollation([field])
self.assertEqual(collation.element_arrays_and_dims, {})
def test_t1(self):
collation = FieldCollation([_make_field(lbyr=2013),
_make_field(lbyr=2014)])
result = collation.element_arrays_and_dims
self.assertEqual(list(result.keys()), ['t1'])
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(2013, 1, 1),
datetime(2014, 1, 1)])
self.assertEqual(dims, (0,))
def test_t1_and_t2(self):
collation = FieldCollation([_make_field(lbyr=2013, lbyrd=2000),
_make_field(lbyr=2014, lbyrd=2001),
_make_field(lbyr=2015, lbyrd=2002)])
result = collation.element_arrays_and_dims
self.assertEqual(set(result.keys()), set(['t1', 't2']))
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(2013, 1, 1),
datetime(2014, 1, 1),
datetime(2015, 1, 1)])
self.assertEqual(dims, (0,))
values, dims = result['t2']
self.assertArrayEqual(values, [datetime(2000, 1, 1),
datetime(2001, 1, 1),
datetime(2002, 1, 1)])
self.assertEqual(dims, (0,))
def test_t1_and_t2_and_lbft(self):
collation = FieldCollation([_make_field(lbyr=1, lbyrd=15, lbft=6),
_make_field(lbyr=1, lbyrd=16, lbft=9),
_make_field(lbyr=11, lbyrd=25, lbft=6),
_make_field(lbyr=11, lbyrd=26, lbft=9)])
result = collation.element_arrays_and_dims
self.assertEqual(set(result.keys()), set(['t1', 't2', 'lbft']))
values, dims = result['t1']
self.assertArrayEqual(values, [datetime(1, 1, 1),
datetime(11, 1, 1)])
self.assertEqual(dims, (0,))
values, dims = result['t2']
self.assertArrayEqual(values,
[[datetime(15, 1, 1), datetime(16, 1, 1)],
[datetime(25, 1, 1), datetime(26, 1, 1)]])
self.assertEqual(dims, (0, 1))
values, dims = result['lbft']
self.assertArrayEqual(values, [6, 9])
self.assertEqual(dims, (1,))
def test_blev(self):
collation = FieldCollation([_make_field(blev=1), _make_field(blev=2)])
result = collation.element_arrays_and_dims
keys = set(['blev', 'brsvd1', 'brsvd2', 'brlev',
'bhrlev', 'lblev', 'bhlev'])
self.assertEqual(set(result.keys()), keys)
values, dims = result['blev']
self.assertArrayEqual(values, [1, 2])
self.assertEqual(dims, (0,))
def test_bhlev(self):
collation = FieldCollation([_make_field(blev=0, bhlev=1),
_make_field(blev=1, bhlev=2)])
result = collation.element_arrays_and_dims
keys = set(['blev', 'brsvd1', 'brsvd2', 'brlev',
'bhrlev', 'lblev', 'bhlev'])
self.assertEqual(set(result.keys()), keys)
values, dims = result['bhlev']
self.assertArrayEqual(values, [1, 2])
self.assertEqual(dims, (0,))
class Test__time_comparable_int(tests.IrisTest):
def test(self):
# Define a list of date-time tuples, which should remain both all
# distinct and in ascending order when converted...
test_date_tuples = [
# Increment each component in turn to check that all are handled.
(2004, 1, 1, 0, 0, 0),
(2004, 1, 1, 0, 0, 1),
(2004, 1, 1, 0, 1, 0),
(2004, 1, 1, 1, 0, 0),
(2004, 1, 2, 0, 0, 0),
(2004, 2, 1, 0, 0, 0),
# Go across 2004-02-29 leap-day, and on to "Feb 31 .. Mar 1".
(2004, 2, 27, 0, 0, 0),
(2004, 2, 28, 0, 0, 0),
(2004, 2, 29, 0, 0, 0),
(2004, 2, 30, 0, 0, 0),
(2004, 2, 31, 0, 0, 0),
(2004, 3, 1, 0, 0, 0),
(2005, 1, 1, 0, 0, 0)]
collation = FieldCollation(['foo', 'bar'])
test_date_ints = [collation._time_comparable_int(*test_tuple)
for test_tuple in test_date_tuples]
# Check all values are distinct.
self.assertEqual(len(test_date_ints), len(set(test_date_ints), ))
# Check all values are in order.
self.assertEqual(test_date_ints, sorted(test_date_ints))
if __name__ == "__main__":
tests.main()
| LukeC92/iris | lib/iris/tests/unit/fileformats/um/fast_load_structured_fields/test_FieldCollation.py | Python | lgpl-3.0 | 8,389 |
import unittest
from ctypes import *
import _ctypes_test
lib = CDLL(_ctypes_test.__file__)
class StringPtrTestCase(unittest.TestCase):
def test__POINTER_c_char(self):
class X(Structure):
_fields_ = [("str", POINTER(c_char))]
x = X()
# NULL pointer access
self.assertRaises(ValueError, getattr, x.str, "contents")
b = c_buffer(b"Hello, World")
from sys import getrefcount as grc
self.assertEqual(grc(b), 2)
x.str = b
self.assertEqual(grc(b), 3)
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
for i in range(len(b)):
self.assertEqual(b[i], x.str[i])
self.assertRaises(TypeError, setattr, x, "str", "Hello, World")
def test__c_char_p(self):
class X(Structure):
_fields_ = [("str", c_char_p)]
x = X()
# c_char_p and Python string is compatible
# c_char_p and c_buffer is NOT compatible
self.assertEqual(x.str, None)
x.str = b"Hello, World"
self.assertEqual(x.str, b"Hello, World")
b = c_buffer(b"Hello, World")
self.assertRaises(TypeError, setattr, x, b"str", b)
def test_functions(self):
strchr = lib.my_strchr
strchr.restype = c_char_p
# c_char_p and Python string is compatible
# c_char_p and c_buffer are now compatible
strchr.argtypes = c_char_p, c_char
self.assertEqual(strchr(b"abcdef", b"c"), b"cdef")
self.assertEqual(strchr(c_buffer(b"abcdef"), b"c"), b"cdef")
# POINTER(c_char) and Python string is NOT compatible
# POINTER(c_char) and c_buffer() is compatible
strchr.argtypes = POINTER(c_char), c_char
buf = c_buffer(b"abcdef")
self.assertEqual(strchr(buf, b"c"), b"cdef")
self.assertEqual(strchr(b"abcdef", b"c"), b"cdef")
# XXX These calls are dangerous, because the first argument
# to strchr is no longer valid after the function returns!
# So we must keep a reference to buf separately
strchr.restype = POINTER(c_char)
buf = c_buffer(b"abcdef")
r = strchr(buf, b"c")
x = r[0], r[1], r[2], r[3], r[4]
self.assertEqual(x, (b"c", b"d", b"e", b"f", b"\000"))
del buf
# x1 will NOT be the same as x, usually:
x1 = r[0], r[1], r[2], r[3], r[4]
if __name__ == '__main__':
unittest.main()
| theheros/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_stringptr.py | Python | lgpl-3.0 | 2,559 |
"""Windows-specific implementation of process utilities.
This file is only meant to be imported by process.py, not by end-users.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# stdlib
import os
import sys
import ctypes
import msvcrt
from ctypes import c_int, POINTER
from ctypes.wintypes import LPCWSTR, HLOCAL
from subprocess import STDOUT
# our own imports
from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
from . import py3compat
from . import text
from .encoding import DEFAULT_ENCODING
#-----------------------------------------------------------------------------
# Function definitions
#-----------------------------------------------------------------------------
class AvoidUNCPath(object):
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Example
-------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self):
self.path = os.getcwdu()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(self, exc_type, exc_value, traceback):
if self.is_unc_path:
os.chdir(self.path)
def _find_cmd(cmd):
"""Find the full path to a .bat or .exe using the win32api module."""
try:
from win32api import SearchPath
except ImportError:
raise ImportError('you need to have pywin32 installed for this to work')
else:
PATH = os.environ['PATH']
extensions = ['.exe', '.com', '.bat', '.py']
path = None
for ext in extensions:
try:
path = SearchPath(PATH, cmd + ext)[0]
except:
pass
if path is None:
raise OSError("command %r not found" % cmd)
else:
return path
def _system_body(p):
"""Callback for _system."""
enc = DEFAULT_ENCODING
for line in read_no_interrupt(p.stdout).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stdout)
for line in read_no_interrupt(p.stderr).splitlines():
line = line.decode(enc, 'replace')
print(line, file=sys.stderr)
# Wait to finish for returncode
return p.wait()
def system(cmd):
"""Win32 version of os.system() that works with network shares.
Note that this implementation returns None, as meant for use in IPython.
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
None : we explicitly do NOT return the subprocess status code, as this
utility is meant to be used extensively in IPython, where any return value
would trigger :func:`sys.displayhook` calls.
"""
# The controller provides interactivity with both
# stdin and stdout
#import _process_win32_controller
#_process_win32_controller.system(cmd)
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
return process_handler(cmd, _system_body)
def getoutput(cmd):
"""Return standard output of executing cmd in a shell.
Accepts the same arguments as os.system().
Parameters
----------
cmd : str
A command to be executed in the system shell.
Returns
-------
stdout : str
"""
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
if out is None:
out = b''
return py3compat.bytes_to_str(out)
try:
CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
CommandLineToArgvW.restype = POINTER(LPCWSTR)
LocalFree = ctypes.windll.kernel32.LocalFree
LocalFree.res_type = HLOCAL
LocalFree.arg_types = [HLOCAL]
def arg_split(commandline, posix=False, strict=True):
"""Split a command line's arguments in a shell-like manner.
This is a special version for windows that use a ctypes call to CommandLineToArgvW
to do the argv splitting. The posix paramter is ignored.
If strict=False, process_common.arg_split(...strict=False) is used instead.
"""
#CommandLineToArgvW returns path to executable if called with empty string.
if commandline.strip() == "":
return []
if not strict:
# not really a cl-arg, fallback on _process_common
return py_arg_split(commandline, posix=posix, strict=strict)
argvn = c_int()
result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
result_array_type = LPCWSTR * argvn.value
result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
retval = LocalFree(result_pointer)
return result
except AttributeError:
arg_split = py_arg_split
| cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/_process_win32.py | Python | lgpl-3.0 | 6,316 |
# Copyright 2014 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Hampapur Ajay, Praneet Bachheti, Rudra Rugge, Atul Moghe
from oslo.config import cfg
import requests
from neutron.api.v2 import attributes as attr
from neutron.common import exceptions as exc
from neutron.db import portbindings_base
from neutron.db import quota_db # noqa
from neutron.extensions import external_net
from neutron.extensions import portbindings
from neutron.extensions import securitygroup
from neutron import neutron_plugin_base_v2
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils as json
from neutron.openstack.common import log as logging
from simplejson import JSONDecodeError
LOG = logging.getLogger(__name__)
vnc_opts = [
cfg.StrOpt('api_server_ip', default='127.0.0.1',
help='IP address to connect to VNC controller'),
cfg.StrOpt('api_server_port', default='8082',
help='Port to connect to VNC controller'),
cfg.DictOpt('contrail_extensions', default={},
help='Enable Contrail extensions(policy, ipam)'),
]
# ContrailError message have translated already.
# so there is no need to use i18n here.
class ContrailNotFoundError(exc.NotFound):
message = '%(msg)s'
class ContrailConflictError(exc.Conflict):
message = '%(msg)s'
class ContrailBadRequestError(exc.BadRequest):
message = '%(msg)s'
class ContrailServiceUnavailableError(exc.ServiceUnavailable):
message = '%(msg)s'
class ContrailNotAuthorizedError(exc.NotAuthorized):
message = '%(msg)s'
class InvalidContrailExtensionError(exc.ServiceUnavailable):
message = _("Invalid Contrail Extension: %(ext_name) %(ext_class)")
CONTRAIL_EXCEPTION_MAP = {
requests.codes.not_found: ContrailNotFoundError,
requests.codes.conflict: ContrailConflictError,
requests.codes.bad_request: ContrailBadRequestError,
requests.codes.service_unavailable: ContrailServiceUnavailableError,
requests.codes.unauthorized: ContrailNotAuthorizedError,
requests.codes.request_timeout: ContrailServiceUnavailableError,
}
class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2,
securitygroup.SecurityGroupPluginBase,
portbindings_base.PortBindingBaseMixin,
external_net.External_net):
supported_extension_aliases = ["security-group", "router",
"port-security", "binding", "agent",
"quotas", "external-net",
"allowed-address-pairs", "extra_dhcp_opt"]
PLUGIN_URL_PREFIX = '/neutron'
__native_bulk_support = False
# patch VIF_TYPES
portbindings.__dict__['VIF_TYPE_VROUTER'] = 'vrouter'
portbindings.VIF_TYPES.append(portbindings.VIF_TYPE_VROUTER)
def _parse_class_args(self):
"""Parse the contrailplugin.ini file.
Opencontrail supports extension such as ipam, policy, these extensions
can be configured in the plugin configuration file as shown below.
Plugin then loads the specified extensions.
contrail_extensions=ipam:<classpath>,policy:<classpath>
"""
contrail_extensions = cfg.CONF.APISERVER.contrail_extensions
# If multiple class specified for same extension, last one will win
# according to DictOpt behavior
for ext_name, ext_class in contrail_extensions.items():
try:
if not ext_class:
LOG.error(_('Malformed contrail extension...'))
continue
self.supported_extension_aliases.append(ext_name)
ext_class = importutils.import_class(ext_class)
ext_instance = ext_class()
ext_instance.set_core(self)
for method in dir(ext_instance):
for prefix in ['get', 'update', 'delete', 'create']:
if method.startswith('%s_' % prefix):
setattr(self, method,
ext_instance.__getattribute__(method))
except Exception:
LOG.exception(_("Contrail Backend Error"))
# Converting contrail backend error to Neutron Exception
raise InvalidContrailExtensionError(
ext_name=ext_name, ext_class=ext_class)
#keystone
self._authn_token = None
if cfg.CONF.auth_strategy == 'keystone':
kcfg = cfg.CONF.keystone_authtoken
body = '{"auth":{"passwordCredentials":{'
body += ' "username": "%s",' % (kcfg.admin_user)
body += ' "password": "%s"},' % (kcfg.admin_password)
body += ' "tenantName":"%s"}}' % (kcfg.admin_tenant_name)
self._authn_body = body
self._authn_token = cfg.CONF.keystone_authtoken.admin_token
self._keystone_url = "%s://%s:%s%s" % (
cfg.CONF.keystone_authtoken.auth_protocol,
cfg.CONF.keystone_authtoken.auth_host,
cfg.CONF.keystone_authtoken.auth_port,
"/v2.0/tokens")
def __init__(self):
super(NeutronPluginContrailCoreV2, self).__init__()
portbindings_base.register_port_dict_function()
cfg.CONF.register_opts(vnc_opts, 'APISERVER')
self._parse_class_args()
def _get_base_binding_dict(self):
binding = {
portbindings.VIF_TYPE: portbindings.VIF_TYPE_VROUTER,
portbindings.VIF_DETAILS: {
# TODO(praneetb): Replace with new VIF security details
portbindings.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases
}
}
return binding
def get_agents(self, context, filters=None, fields=None):
# This method is implemented so that horizon is happy
return []
def _request_api_server(self, url, data=None, headers=None):
# Attempt to post to Api-Server
response = requests.post(url, data=data, headers=headers)
if (response.status_code == requests.codes.unauthorized):
# Get token from keystone and save it for next request
response = requests.post(self._keystone_url,
data=self._authn_body,
headers={'Content-type': 'application/json'})
if (response.status_code == requests.codes.ok):
# plan is to re-issue original request with new token
auth_headers = headers or {}
authn_content = json.loads(response.text)
self._authn_token = authn_content['access']['token']['id']
auth_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, auth_headers)
else:
raise RuntimeError('Authentication Failure')
return response
def _request_api_server_authn(self, url, data=None, headers=None):
authn_headers = headers or {}
if self._authn_token is not None:
authn_headers['X-AUTH-TOKEN'] = self._authn_token
response = self._request_api_server(url, data, headers=authn_headers)
return response
def _relay_request(self, url_path, data=None):
"""Send received request to api server."""
url = "http://%s:%s%s" % (cfg.CONF.APISERVER.api_server_ip,
cfg.CONF.APISERVER.api_server_port,
url_path)
return self._request_api_server_authn(
url, data=data, headers={'Content-type': 'application/json'})
def _request_backend(self, context, data_dict, obj_name, action):
context_dict = self._encode_context(context, action, obj_name)
data = json.dumps({'context': context_dict, 'data': data_dict})
url_path = "%s/%s" % (self.PLUGIN_URL_PREFIX, obj_name)
response = self._relay_request(url_path, data=data)
try:
return response.status_code, response.json()
except JSONDecodeError:
return response.status_code, response.content
def _encode_context(self, context, operation, apitype):
cdict = {'user_id': getattr(context, 'user_id', ''),
'is_admin': getattr(context, 'is_admin', False),
'operation': operation,
'type': apitype,
'tenant_id': getattr(context, 'tenant_id', None)}
if context.roles:
cdict['roles'] = context.roles
if context.tenant:
cdict['tenant'] = context.tenant
return cdict
def _encode_resource(self, resource_id=None, resource=None, fields=None,
filters=None):
resource_dict = {}
if resource_id:
resource_dict['id'] = resource_id
if resource:
resource_dict['resource'] = resource
resource_dict['filters'] = filters
resource_dict['fields'] = fields
return resource_dict
def _prune(self, resource_dict, fields):
if fields:
return dict(((key, item) for key, item in resource_dict.items()
if key in fields))
return resource_dict
def _transform_response(self, status_code, info=None, obj_name=None,
fields=None):
if status_code == requests.codes.ok:
if not isinstance(info, list):
return self._prune(info, fields)
else:
return [self._prune(items, fields) for items in info]
self._raise_contrail_error(status_code, info, obj_name)
def _raise_contrail_error(self, status_code, info, obj_name):
if status_code == requests.codes.bad_request:
raise ContrailBadRequestError(
msg=info['message'], resource=obj_name)
error_class = CONTRAIL_EXCEPTION_MAP[status_code]
raise error_class(msg=info['message'])
def _create_resource(self, res_type, context, res_data):
"""Create a resource in API server.
This method encodes neutron model, and sends it to the
contrail api server.
"""
for key, value in res_data[res_type].items():
if value == attr.ATTR_NOT_SPECIFIED:
del res_data[res_type][key]
res_dict = self._encode_resource(resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'CREATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("create_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _get_resource(self, res_type, context, id, fields):
"""Get a resource from API server.
This method gets a resource from the contrail api server
"""
res_dict = self._encode_resource(resource_id=id, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READ')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug("get_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _update_resource(self, res_type, context, id, res_data):
"""Update a resource in API server.
This method updates a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id,
resource=res_data[res_type])
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'UPDATE')
res_dicts = self._transform_response(status_code, info=res_info,
obj_name=res_type)
LOG.debug("update_%(res_type)s(): %(res_dicts)s",
{'res_type': res_type, 'res_dicts': res_dicts})
return res_dicts
def _delete_resource(self, res_type, context, id):
"""Delete a resource in API server
This method deletes a resource in the contrail api server
"""
res_dict = self._encode_resource(resource_id=id)
LOG.debug("delete_%(res_type)s(): %(id)s",
{'res_type': res_type, 'id': id})
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'DELETE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name=res_type)
def _list_resource(self, res_type, context, filters, fields):
res_dict = self._encode_resource(filters=filters, fields=fields)
status_code, res_info = self._request_backend(context, res_dict,
res_type, 'READALL')
res_dicts = self._transform_response(status_code, info=res_info,
fields=fields, obj_name=res_type)
LOG.debug(
"get_%(res_type)s(): filters: %(filters)r data: %(res_dicts)r",
{'res_type': res_type, 'filters': filters,
'res_dicts': res_dicts})
return res_dicts
def _count_resource(self, res_type, context, filters):
res_dict = self._encode_resource(filters=filters)
status_code, res_count = self._request_backend(context, res_dict,
res_type, 'READCOUNT')
LOG.debug("get_%(res_type)s_count(): %(res_count)r",
{'res_type': res_type, 'res_count': res_count})
return res_count
def _get_network(self, context, id, fields=None):
return self._get_resource('network', context, id, fields)
def create_network(self, context, network):
"""Creates a new Virtual Network."""
return self._create_resource('network', context, network)
def get_network(self, context, network_id, fields=None):
"""Get the attributes of a particular Virtual Network."""
return self._get_network(context, network_id, fields)
def update_network(self, context, network_id, network):
"""Updates the attributes of a particular Virtual Network."""
return self._update_resource('network', context, network_id,
network)
def delete_network(self, context, network_id):
"""Creates a new Virtual Network.
Deletes the network with the specified network identifier
belonging to the specified tenant.
"""
self._delete_resource('network', context, network_id)
def get_networks(self, context, filters=None, fields=None):
"""Get the list of Virtual Networks."""
return self._list_resource('network', context, filters,
fields)
def get_networks_count(self, context, filters=None):
"""Get the count of Virtual Network."""
networks_count = self._count_resource('network', context, filters)
return networks_count['count']
def create_subnet(self, context, subnet):
"""Creates a new subnet, and assigns it a symbolic name."""
if subnet['subnet']['gateway_ip'] is None:
subnet['subnet']['gateway_ip'] = '0.0.0.0'
if subnet['subnet']['host_routes'] != attr.ATTR_NOT_SPECIFIED:
if (len(subnet['subnet']['host_routes']) >
cfg.CONF.max_subnet_host_routes):
raise exc.HostRoutesExhausted(subnet_id=subnet[
'subnet'].get('id', _('new subnet')),
quota=cfg.CONF.max_subnet_host_routes)
subnet_created = self._create_resource('subnet', context, subnet)
return self._make_subnet_dict(subnet_created)
def _make_subnet_dict(self, subnet):
if 'gateway_ip' in subnet and subnet['gateway_ip'] == '0.0.0.0':
subnet['gateway_ip'] = None
return subnet
def _get_subnet(self, context, subnet_id, fields=None):
subnet = self._get_resource('subnet', context, subnet_id, fields)
return self._make_subnet_dict(subnet)
def get_subnet(self, context, subnet_id, fields=None):
"""Get the attributes of a particular subnet."""
return self._get_subnet(context, subnet_id, fields)
def update_subnet(self, context, subnet_id, subnet):
"""Updates the attributes of a particular subnet."""
subnet = self._update_resource('subnet', context, subnet_id, subnet)
return self._make_subnet_dict(subnet)
def delete_subnet(self, context, subnet_id):
"""
Deletes the subnet with the specified subnet identifier
belonging to the specified tenant.
"""
self._delete_resource('subnet', context, subnet_id)
def get_subnets(self, context, filters=None, fields=None):
"""Get the list of subnets."""
return [self._make_subnet_dict(s)
for s in self._list_resource(
'subnet', context, filters, fields)]
def get_subnets_count(self, context, filters=None):
"""Get the count of subnets."""
subnets_count = self._count_resource('subnet', context, filters)
return subnets_count['count']
def _extend_port_dict_security_group(self, port_res, port_db):
# Security group bindings will be retrieved from the sqlalchemy
# model. As they're loaded eagerly with ports because of the
# joined load they will not cause an extra query.
port_res[securitygroup.SECURITYGROUPS] = port_db.get(
'security_groups', []) or []
return port_res
def _make_port_dict(self, port):
return port
def _get_port(self, context, id, fields=None):
port = self._get_resource('port', context, id, fields)
return self._make_port_dict(port)
def _update_ips_for_port(self, context, network_id, port_id, original_ips,
new_ips):
"""Add or remove IPs from the port."""
# These ips are still on the port and haven't been removed
prev_ips = []
# the new_ips contain all of the fixed_ips that are to be updated
if len(new_ips) > cfg.CONF.max_fixed_ips_per_port:
msg = _('Exceeded maximim amount of fixed ips per port')
raise exc.InvalidInput(error_message=msg)
# Remove all of the intersecting elements
for original_ip in original_ips[:]:
for new_ip in new_ips[:]:
if ('ip_address' in new_ip and
original_ip['ip_address'] == new_ip['ip_address']):
original_ips.remove(original_ip)
new_ips.remove(new_ip)
prev_ips.append(original_ip)
return new_ips, prev_ips
def create_port(self, context, port):
"""Creates a port on the specified Virtual Network."""
port = self._create_resource('port', context, port)
return self._make_port_dict(port)
def get_port(self, context, port_id, fields=None):
"""Get the attributes of a particular port."""
return self._get_port(context, port_id, fields)
def update_port(self, context, port_id, port):
"""Updates a port.
Updates the attributes of a port on the specified Virtual
Network.
"""
if 'fixed_ips' in port['port']:
original = self._get_port(context, port_id)
added_ips, prev_ips = self._update_ips_for_port(
context, original['network_id'], port_id,
original['fixed_ips'], port['port']['fixed_ips'])
port['port']['fixed_ips'] = prev_ips + added_ips
port = self._update_resource('port', context, port_id, port)
return self._make_port_dict(port)
def delete_port(self, context, port_id):
"""Deletes a port.
Deletes a port on a specified Virtual Network,
if the port contains a remote interface attachment,
the remote interface is first un-plugged and then the port
is deleted.
"""
self._delete_resource('port', context, port_id)
def get_ports(self, context, filters=None, fields=None):
"""Get all ports.
Retrieves all port identifiers belonging to the
specified Virtual Network with the specfied filter.
"""
return [self._make_port_dict(p)
for p in self._list_resource('port', context, filters, fields)]
def get_ports_count(self, context, filters=None):
"""Get the count of ports."""
ports_count = self._count_resource('port', context, filters)
return ports_count['count']
# Router API handlers
def create_router(self, context, router):
"""Creates a router.
Creates a new Logical Router, and assigns it
a symbolic name.
"""
return self._create_resource('router', context, router)
def get_router(self, context, router_id, fields=None):
"""Get the attributes of a router."""
return self._get_resource('router', context, router_id, fields)
def update_router(self, context, router_id, router):
"""Updates the attributes of a router."""
return self._update_resource('router', context, router_id,
router)
def delete_router(self, context, router_id):
"""Deletes a router."""
self._delete_resource('router', context, router_id)
def get_routers(self, context, filters=None, fields=None):
"""Retrieves all router identifiers."""
return self._list_resource('router', context, filters, fields)
def get_routers_count(self, context, filters=None):
"""Get the count of routers."""
routers_count = self._count_resource('router', context, filters)
return routers_count['count']
def add_router_interface(self, context, router_id, interface_info):
"""Add interface to a router."""
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exc.BadRequest(resource='router', msg=msg)
if 'port_id' in interface_info:
if 'subnet_id' in interface_info:
msg = _("Cannot specify both subnet-id and port-id")
raise exc.BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'ADDINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='add_router_interface')
return res_info
def remove_router_interface(self, context, router_id, interface_info):
"""Delete interface from a router."""
if not interface_info:
msg = _("Either subnet_id or port_id must be specified")
raise exc.BadRequest(resource='router', msg=msg)
res_dict = self._encode_resource(resource_id=router_id,
resource=interface_info)
status_code, res_info = self._request_backend(context, res_dict,
'router', 'DELINTERFACE')
if status_code != requests.codes.ok:
self._raise_contrail_error(status_code, info=res_info,
obj_name='remove_router_interface')
return res_info
# Floating IP API handlers
def create_floatingip(self, context, floatingip):
"""Creates a floating IP."""
return self._create_resource('floatingip', context, floatingip)
def update_floatingip(self, context, fip_id, floatingip):
"""Updates the attributes of a floating IP."""
return self._update_resource('floatingip', context, fip_id,
floatingip)
def get_floatingip(self, context, fip_id, fields=None):
"""Get the attributes of a floating ip."""
return self._get_resource('floatingip', context, fip_id, fields)
def delete_floatingip(self, context, fip_id):
"""Deletes a floating IP."""
self._delete_resource('floatingip', context, fip_id)
def get_floatingips(self, context, filters=None, fields=None):
"""Retrieves all floating ips identifiers."""
return self._list_resource('floatingip', context, filters, fields)
def get_floatingips_count(self, context, filters=None):
"""Get the count of floating IPs."""
fips_count = self._count_resource('floatingip', context, filters)
return fips_count['count']
# Security Group handlers
def create_security_group(self, context, security_group):
"""Creates a Security Group."""
return self._create_resource('security_group', context,
security_group)
def get_security_group(self, context, sg_id, fields=None, tenant_id=None):
"""Get the attributes of a security group."""
return self._get_resource('security_group', context, sg_id, fields)
def update_security_group(self, context, sg_id, security_group):
"""Updates the attributes of a security group."""
return self._update_resource('security_group', context, sg_id,
security_group)
def delete_security_group(self, context, sg_id):
"""Deletes a security group."""
self._delete_resource('security_group', context, sg_id)
def get_security_groups(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group identifiers."""
return self._list_resource('security_group', context,
filters, fields)
def get_security_groups_count(self, context, filters=None):
return 0
def get_security_group_rules_count(self, context, filters=None):
return 0
def create_security_group_rule(self, context, security_group_rule):
"""Creates a security group rule."""
return self._create_resource('security_group_rule', context,
security_group_rule)
def delete_security_group_rule(self, context, sg_rule_id):
"""Deletes a security group rule."""
self._delete_resource('security_group_rule', context, sg_rule_id)
def get_security_group_rule(self, context, sg_rule_id, fields=None):
"""Get the attributes of a security group rule."""
return self._get_resource('security_group_rule', context,
sg_rule_id, fields)
def get_security_group_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Retrieves all security group rules."""
return self._list_resource('security_group_rule', context,
filters, fields)
| cloudwatt/contrail-neutron-plugin | neutron_plugin_contrail/plugins/opencontrail/contrail_plugin.py | Python | apache-2.0 | 28,349 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.gather."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
_TEST_TYPES = (dtypes.int64, dtypes.float32,
dtypes.complex64, dtypes.complex128)
class GatherTest(test.TestCase):
def _buildParams(self, data, dtype):
data = data.astype(dtype.as_numpy_dtype)
# For complex types, add an index-dependent imaginary component so we can
# tell we got the right value.
if dtype.is_complex:
return data + 10j * data
return data
def testScalar1D(self):
with self.cached_session(use_gpu=True):
data = np.array([0, 1, 2, 3, 7, 5])
for dtype in _TEST_TYPES:
for indices in 4, [1, 2, 2, 4, 5]:
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = gather_t.eval()
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
def testScalar2D(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testSimpleTwoD32(self):
with self.session(use_gpu=True):
data = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8],
[9, 10, 11], [12, 13, 14]])
for dtype in _TEST_TYPES:
for axis in range(data.ndim):
params_np = self._buildParams(data, dtype)
params = constant_op.constant(params_np)
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
def testHigherRank(self):
# We check that scalar and empty indices shapes work as well
shape = (2, 1, 3, 2)
for indices_shape in (), (0,), (2, 0), (2, 3):
for dtype in _TEST_TYPES:
for axis in range(len(shape)):
params = self._buildParams(np.random.randn(*shape), dtype)
indices = np.random.randint(shape[axis], size=indices_shape)
with self.cached_session(use_gpu=True) as sess:
tf_params = constant_op.constant(params)
tf_indices = constant_op.constant(indices)
# Check that both positive and negative indices for axis work.
tf_axis = constant_op.constant(axis)
tf_negative_axis = constant_op.constant(-len(shape) + axis)
gather = array_ops.gather(tf_params, tf_indices, axis=tf_axis)
gather_negative_axis = array_ops.gather(
tf_params, tf_indices, axis=tf_negative_axis)
gather_value, gather_negative_axis_value = sess.run(
[gather, gather_negative_axis])
gather_np = np.take(params, indices, axis)
self.assertAllEqual(gather_np, gather_value)
self.assertAllEqual(gather_np, gather_negative_axis_value)
expected_shape = (params.shape[:axis] + indices.shape +
params.shape[axis + 1:])
self.assertEqual(expected_shape, gather.shape)
self.assertEqual(expected_shape, gather_negative_axis.shape)
# Test gradients
gather_grad = np.random.randn(
*gather.get_shape().as_list()).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
gather_grad -= 1j * gather_grad
params_grad, indices_grad, axis_grad = gradients_impl.gradients(
gather, [tf_params, tf_indices, tf_axis], gather_grad)
self.assertEqual(indices_grad, None)
self.assertEqual(axis_grad, None)
if dtype.is_integer:
self.assertEqual(params_grad, None)
continue
# For axis 0, we are able to create an efficient IndexedSlices for
# the gradient.
if axis == 0:
self.assertEqual(type(params_grad), ops.IndexedSlices)
params_grad = ops.convert_to_tensor(params_grad)
correct_params_grad = np.zeros(shape).astype(dtype.as_numpy_dtype)
outer_dims = axis
inner_dims = len(shape) - axis - 1
gather_grad = gather_grad.reshape(
shape[:axis] + (indices.size,) + shape[axis + 1:])
for source_index, dest_index in enumerate(indices.flat):
dest_slice = ((slice(None),) * outer_dims + (dest_index,) +
(slice(None),) * inner_dims)
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(correct_params_grad, params_grad.eval(),
atol=2e-6, rtol=2e-6)
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])
with self.cached_session():
self.assertAllEqual([b"qwer", b"uiop"],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([b"asdf", b"qwer"],
array_ops.gather(params, 0, axis=1).eval())
def testUInt32AndUInt64(self):
for unsigned_type in (dtypes.uint32, dtypes.uint64):
params = self._buildParams(
np.array([[1, 2, 3], [7, 8, 9]]), unsigned_type)
with self.cached_session():
self.assertAllEqual([7, 8, 9],
array_ops.gather(params, 1, axis=0).eval())
self.assertAllEqual([1, 7], array_ops.gather(params, 0, axis=1).eval())
def testUnknownIndices(self):
params = constant_op.constant([[0, 1, 2]])
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices)
self.assertEqual(None, gather_t.get_shape())
def testUnknownAxis(self):
params = constant_op.constant([[0, 1, 2]])
indices = constant_op.constant([[0, 0], [0, 0]])
axis = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
# Rank 2 params with rank 2 indices results in a rank 3 shape.
self.assertEqual([None, None, None], gather_t.shape.as_list())
# If indices is also unknown the result rank is unknown.
indices = array_ops.placeholder(dtypes.int32)
gather_t = array_ops.gather(params, indices, axis=axis)
self.assertEqual(None, gather_t.shape)
def testBadIndicesCPU(self):
with self.session(use_gpu=False):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
# On GPU the bad indices do not raise error but fetch 0 values
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
params = [[0, 1, 2], [3, 4, 5]]
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 2\)"):
array_ops.gather(params, [[7]], axis=0).eval()
with self.assertRaisesOpError(r"indices\[0,0\] = 7 is not in \[0, 3\)"):
array_ops.gather(params, [[7]], axis=1).eval()
def testBadAxis(self):
with self.session(use_gpu=True):
params = [0, 1, 2]
params_ph = array_ops.placeholder(dtypes.int32)
indices = 0
for bad_axis in (1, 2, -2):
# Shape inference can validate axis for known params rank.
with self.assertRaisesWithPredicateMatch(
ValueError, "Shape must be at least rank . but is rank 1"):
array_ops.gather(params, indices, axis=bad_axis)
# If params rank is unknown, an op error occurs.
with self.assertRaisesOpError(
r"Expected axis in the range \[-1, 1\), but got %s" % bad_axis):
array_ops.gather(params_ph, indices, axis=bad_axis).eval(
feed_dict={params_ph: params})
def testEmptySlices(self):
with self.session(use_gpu=True):
for dtype in _TEST_TYPES:
for itype in np.int32, np.int64:
# Leading axis gather.
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
if __name__ == "__main__":
test.main()
| seanli9jan/tensorflow | tensorflow/python/kernel_tests/gather_op_test.py | Python | apache-2.0 | 10,957 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from enum import Enum
from airflow.utils.weekday import WeekDay
class TestWeekDay(unittest.TestCase):
def test_weekday_enum_length(self):
assert len(WeekDay) == 7
def test_weekday_name_value(self):
weekdays = "MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY SUNDAY"
weekdays = weekdays.split()
for i, weekday in enumerate(weekdays, start=1):
weekday_enum = WeekDay(i)
assert weekday_enum == i
assert int(weekday_enum) == i
assert weekday_enum.name == weekday
assert weekday_enum in WeekDay
assert 0 < weekday_enum < 8
assert isinstance(weekday_enum, WeekDay)
assert isinstance(weekday_enum, int)
assert isinstance(weekday_enum, Enum)
| nathanielvarona/airflow | tests/utils/test_weekday.py | Python | apache-2.0 | 1,591 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common.utils.data_utils import rand_name
import tempest.test
class BaseIdentityAdminTest(tempest.test.BaseTestCase):
@classmethod
def setUpClass(cls):
super(BaseIdentityAdminTest, cls).setUpClass()
os = clients.AdminManager(interface=cls._interface)
cls.client = os.identity_client
cls.token_client = os.token_client
cls.endpoints_client = os.endpoints_client
cls.v3_client = os.identity_v3_client
cls.service_client = os.service_client
cls.policy_client = os.policy_client
cls.v3_token = os.token_v3_client
cls.creds_client = os.credentials_client
if not cls.client.has_admin_extensions():
raise cls.skipException("Admin extensions disabled")
cls.data = DataGenerator(cls.client)
cls.v3data = DataGenerator(cls.v3_client)
os = clients.Manager(interface=cls._interface)
cls.non_admin_client = os.identity_client
cls.v3_non_admin_client = os.identity_v3_client
@classmethod
def tearDownClass(cls):
cls.data.teardown_all()
cls.v3data.teardown_all()
super(BaseIdentityAdminTest, cls).tearDownClass()
def disable_user(self, user_name):
user = self.get_user_by_name(user_name)
self.client.enable_disable_user(user['id'], False)
def disable_tenant(self, tenant_name):
tenant = self.get_tenant_by_name(tenant_name)
self.client.update_tenant(tenant['id'], enabled=False)
def get_user_by_name(self, name):
_, users = self.client.get_users()
user = [u for u in users if u['name'] == name]
if len(user) > 0:
return user[0]
def get_tenant_by_name(self, name):
_, tenants = self.client.list_tenants()
tenant = [t for t in tenants if t['name'] == name]
if len(tenant) > 0:
return tenant[0]
def get_role_by_name(self, name):
_, roles = self.client.list_roles()
role = [r for r in roles if r['name'] == name]
if len(role) > 0:
return role[0]
class DataGenerator(object):
def __init__(self, client):
self.client = client
self.users = []
self.tenants = []
self.roles = []
self.role_name = None
self.v3_users = []
self.projects = []
self.v3_roles = []
def setup_test_user(self):
"""Set up a test user."""
self.setup_test_tenant()
self.test_user = rand_name('test_user_')
self.test_password = rand_name('pass_')
self.test_email = self.test_user + '@testmail.tm'
resp, self.user = self.client.create_user(self.test_user,
self.test_password,
self.tenant['id'],
self.test_email)
self.users.append(self.user)
def setup_test_tenant(self):
"""Set up a test tenant."""
self.test_tenant = rand_name('test_tenant_')
self.test_description = rand_name('desc_')
resp, self.tenant = self.client.create_tenant(
name=self.test_tenant,
description=self.test_description)
self.tenants.append(self.tenant)
def setup_test_role(self):
"""Set up a test role."""
self.test_role = rand_name('role')
resp, self.role = self.client.create_role(self.test_role)
self.roles.append(self.role)
def setup_test_v3_user(self):
"""Set up a test v3 user."""
self.setup_test_project()
self.test_user = rand_name('test_user_')
self.test_password = rand_name('pass_')
self.test_email = self.test_user + '@testmail.tm'
resp, self.v3_user = self.client.create_user(self.test_user,
self.test_password,
self.project['id'],
self.test_email)
self.v3_users.append(self.v3_user)
def setup_test_project(self):
"""Set up a test project."""
self.test_project = rand_name('test_project_')
self.test_description = rand_name('desc_')
resp, self.project = self.client.create_project(
name=self.test_project,
description=self.test_description)
self.projects.append(self.project)
def setup_test_v3_role(self):
"""Set up a test v3 role."""
self.test_role = rand_name('role')
resp, self.v3_role = self.client.create_role(self.test_role)
self.v3_roles.append(self.v3_role)
def teardown_all(self):
for user in self.users:
self.client.delete_user(user['id'])
for tenant in self.tenants:
self.client.delete_tenant(tenant['id'])
for role in self.roles:
self.client.delete_role(role['id'])
for v3_user in self.v3_users:
self.client.delete_user(v3_user['id'])
for v3_project in self.projects:
self.client.delete_project(v3_project['id'])
for v3_role in self.v3_roles:
self.client.delete_role(v3_role['id'])
| itskewpie/tempest | tempest/api/identity/base.py | Python | apache-2.0 | 6,216 |
"""Support for NX584 alarm control panels."""
import logging
from nx584 import client
import requests
import voluptuous as vol
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel import PLATFORM_SCHEMA
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "NX584"
DEFAULT_PORT = 5007
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NX584 platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
url = f"http://{host}:{port}"
try:
add_entities([NX584Alarm(hass, url, name)])
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NX584: %s", str(ex))
return
class NX584Alarm(alarm.AlarmControlPanel):
"""Representation of a NX584-based alarm panel."""
def __init__(self, hass, url, name):
"""Init the nx584 alarm panel."""
self._hass = hass
self._name = name
self._url = url
self._alarm = client.Client(self._url)
# Do an initial list operation so that we will try to actually
# talk to the API and trigger a requests exception for setup_platform()
# to catch
self._alarm.list_zones()
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def code_format(self):
"""Return one or more digits/characters."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def update(self):
"""Process new events from panel."""
try:
part = self._alarm.list_partitions()[0]
zones = self._alarm.list_zones()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to %(host)s: %(reason)s",
dict(host=self._url, reason=ex),
)
self._state = None
zones = []
except IndexError:
_LOGGER.error("NX584 reports no partitions")
self._state = None
zones = []
bypassed = False
for zone in zones:
if zone["bypassed"]:
_LOGGER.debug(
"Zone %(zone)s is bypassed, assuming HOME",
dict(zone=zone["number"]),
)
bypassed = True
break
if not part["armed"]:
self._state = STATE_ALARM_DISARMED
elif bypassed:
self._state = STATE_ALARM_ARMED_HOME
else:
self._state = STATE_ALARM_ARMED_AWAY
for flag in part["condition_flags"]:
if flag == "Siren on":
self._state = STATE_ALARM_TRIGGERED
def alarm_disarm(self, code=None):
"""Send disarm command."""
self._alarm.disarm(code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self._alarm.arm("stay")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self._alarm.arm("exit")
| leppa/home-assistant | homeassistant/components/nx584/alarm_control_panel.py | Python | apache-2.0 | 4,039 |
#!/usr/bin/python
import sys
def tokens(nodes):
for i in range(0, nodes):
print (i * (2 ** 127 - 1) / nodes)
tokens(int(sys.argv[1]))
| aglne/Solandra | scripts/get_initial_tokens.py | Python | apache-2.0 | 148 |
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tests for swift.common.compressing_file_reader """
import unittest
import cStringIO
from slogging.compressing_file_reader import CompressingFileReader
class TestCompressingFileReader(unittest.TestCase):
def test_read(self):
plain = 'obj\ndata'
s = cStringIO.StringIO(plain)
expected = '\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xcaO\xca\xe2JI,'\
'I\x04\x00\x00\x00\xff\xff\x03\x00P(\xa8\x1f\x08\x00\x00'\
'\x00'
x = CompressingFileReader(s)
compressed = ''.join(iter(lambda: x.read(), ''))
self.assertEquals(compressed, expected)
self.assertEquals(x.read(), '')
| rackerlabs/sloggingo | test_slogging/unit/test_compressing_file_reader.py | Python | apache-2.0 | 1,258 |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Data.Custom import *
from QuantConnect.Algorithm import *
from QuantConnect.Python import PythonQuandl
### <summary>
### The algorithm creates new indicator value with the existing indicator method by Indicator Extensions
### Demonstration of using the external custom datasource Quandl to request the VIX and VXV daily data
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="custom data" />
### <meta name="tag" content="indicators" />
### <meta name="tag" content="indicator classes" />
### <meta name="tag" content="plotting indicators" />
### <meta name="tag" content="charting" />
class CustomDataIndicatorExtensionsAlgorithm(QCAlgorithm):
# Initialize the data and resolution you require for your strategy
def Initialize(self):
self.SetStartDate(2014,1,1)
self.SetEndDate(2018,1,1)
self.SetCash(25000)
self.vix = 'CBOE/VIX'
self.vxv = 'CBOE/VXV'
# Define the symbol and "type" of our generic data
self.AddData(QuandlVix, self.vix, Resolution.Daily)
self.AddData(Quandl, self.vxv, Resolution.Daily)
# Set up default Indicators, these are just 'identities' of the closing price
self.vix_sma = self.SMA(self.vix, 1, Resolution.Daily)
self.vxv_sma = self.SMA(self.vxv, 1, Resolution.Daily)
# This will create a new indicator whose value is smaVXV / smaVIX
self.ratio = IndicatorExtensions.Over(self.vxv_sma, self.vix_sma)
# Plot indicators each time they update using the PlotIndicator function
self.PlotIndicator("Ratio", self.ratio)
self.PlotIndicator("Data", self.vix_sma, self.vxv_sma)
# OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
def OnData(self, data):
# Wait for all indicators to fully initialize
if not (self.vix_sma.IsReady and self.vxv_sma.IsReady and self.ratio.IsReady): return
if not self.Portfolio.Invested and self.ratio.Current.Value > 1:
self.MarketOrder(self.vix, 100)
elif self.ratio.Current.Value < 1:
self.Liquidate()
# In CBOE/VIX data, there is a "vix close" column instead of "close" which is the
# default column namein LEAN Quandl custom data implementation.
# This class assigns new column name to match the the external datasource setting.
class QuandlVix(PythonQuandl):
def __init__(self):
self.ValueColumnName = "VIX Close" | AnshulYADAV007/Lean | Algorithm.Python/CustomDataIndicatorExtensionsAlgorithm.py | Python | apache-2.0 | 3,632 |
#!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""AFF4 object representing client stats."""
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib.aff4_objects import standard
class ClientStats(standard.VFSDirectory):
"""A container for all client statistics."""
class SchemaCls(standard.VFSDirectory.SchemaCls):
STATS = aff4.Attribute("aff4:stats", rdfvalue.ClientStats,
"Client Stats.", "Client stats")
| MiniSEC/GRR_clone | lib/aff4_objects/client_stats.py | Python | apache-2.0 | 482 |
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os, sys
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evewspace.settings")
application = get_wsgi_application()
| evewspace/eve-wspace | evewspace/wsgi.py | Python | apache-2.0 | 397 |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import CallResult
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
Application component that provides procedures which
return complex results.
"""
def onConnect(self):
self.join("realm1")
def onJoin(self, details):
def add_complex(a, ai, b, bi):
return CallResult(c = a + b, ci = ai + bi)
self.register(add_complex, 'com.myapp.add_complex')
def split_name(fullname):
forename, surname = fullname.split()
return CallResult(forename, surname)
self.register(split_name, 'com.myapp.split_name')
| robtandy/AutobahnPython | examples/twisted/wamp/basic/rpc/complex/backend.py | Python | apache-2.0 | 1,506 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import sys
import uuid
import eventlet
import mock
from oslo_config import cfg
import oslo_messaging
import testtools
from neutron.agent.common import config
from neutron.agent.dhcp import agent as dhcp_agent
from neutron.agent.dhcp import config as dhcp_config
from neutron.agent import dhcp_agent as entry
from neutron.agent.linux import dhcp
from neutron.agent.linux import interface
from neutron.common import config as common_config
from neutron.common import constants as const
from neutron.common import exceptions
from neutron.common import utils
from neutron import context
from neutron.tests import base
HOSTNAME = 'hostname'
dev_man = dhcp.DeviceManager
rpc_api = dhcp_agent.DhcpPluginApi
DEVICE_MANAGER = '%s.%s' % (dev_man.__module__, dev_man.__name__)
DHCP_PLUGIN = '%s.%s' % (rpc_api.__module__, rpc_api.__name__)
fake_tenant_id = 'aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'
fake_subnet1_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_subnet1 = dhcp.DictModel(dict(id='bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.9.0/24', enable_dhcp=True, name='',
tenant_id=fake_tenant_id,
gateway_ip='172.9.9.1', host_routes=[],
dns_nameservers=[], ip_version=4,
ipv6_ra_mode=None, ipv6_address_mode=None,
allocation_pools=fake_subnet1_allocation_pools))
fake_subnet2_allocation_pools = dhcp.DictModel(dict(id='', start='172.9.8.2',
end='172.9.8.254'))
fake_subnet2 = dhcp.DictModel(dict(id='dddddddd-dddd-dddd-dddddddddddd',
network_id='12345678-1234-5678-1234567890ab',
cidr='172.9.8.0/24', enable_dhcp=False, name='',
tenant_id=fake_tenant_id, gateway_ip='172.9.8.1',
host_routes=[], dns_nameservers=[], ip_version=4,
allocation_pools=fake_subnet2_allocation_pools))
fake_subnet3 = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='192.168.1.1/24', enable_dhcp=True))
fake_ipv6_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='2001:0db8::0/64', enable_dhcp=True,
tenant_id=fake_tenant_id,
gateway_ip='2001:0db8::1', ip_version=6,
ipv6_ra_mode='slaac', ipv6_address_mode=None))
fake_meta_subnet = dhcp.DictModel(dict(id='bbbbbbbb-1111-2222-bbbbbbbbbbbb',
network_id='12345678-1234-5678-1234567890ab',
cidr='169.254.169.252/30',
gateway_ip='169.254.169.253',
enable_dhcp=True))
fake_fixed_ip1 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
ip_address='172.9.9.9'))
fake_fixed_ip2 = dhcp.DictModel(dict(id='', subnet_id=fake_subnet1.id,
ip_address='172.9.9.10'))
fake_fixed_ipv6 = dhcp.DictModel(dict(id='', subnet_id=fake_ipv6_subnet.id,
ip_address='2001:db8::a8bb:ccff:fedd:ee99'))
fake_meta_fixed_ip = dhcp.DictModel(dict(id='', subnet=fake_meta_subnet,
ip_address='169.254.169.254'))
fake_allocation_pool_subnet1 = dhcp.DictModel(dict(id='', start='172.9.9.2',
end='172.9.9.254'))
fake_port1 = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
device_id='dhcp-12345678-1234-aaaa-1234567890ab',
device_owner='',
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip1]))
fake_dhcp_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789022',
device_id='dhcp-12345678-1234-aaaa-123456789022',
device_owner='network:dhcp',
allocation_pools=fake_subnet1_allocation_pools,
mac_address='aa:bb:cc:dd:ee:22',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip2]))
fake_port2 = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
device_id='dhcp-12345678-1234-aaaa-123456789000',
device_owner='',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ip2]))
fake_ipv6_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-123456789000',
device_owner='',
mac_address='aa:bb:cc:dd:ee:99',
network_id='12345678-1234-5678-1234567890ab',
fixed_ips=[fake_fixed_ipv6]))
fake_meta_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=const.DEVICE_OWNER_ROUTER_INTF,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip]))
fake_meta_dvr_port = dhcp.DictModel(fake_meta_port.copy())
fake_meta_dvr_port.device_owner = const.DEVICE_OWNER_DVR_INTERFACE
fake_dist_port = dhcp.DictModel(dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff',
network_id='12345678-1234-5678-1234567890ab',
device_owner=const.DEVICE_OWNER_DVR_INTERFACE,
device_id='forzanapoli',
fixed_ips=[fake_meta_fixed_ip]))
FAKE_NETWORK_UUID = '12345678-1234-5678-1234567890ab'
FAKE_NETWORK_DHCP_NS = "qdhcp-%s" % FAKE_NETWORK_UUID
fake_network = dhcp.NetModel(True, dict(id=FAKE_NETWORK_UUID,
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet2],
ports=[fake_port1]))
fake_network_ipv6 = dhcp.NetModel(True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_ipv6_subnet],
ports=[fake_ipv6_port]))
fake_network_ipv6_ipv4 = dhcp.NetModel(True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_ipv6_subnet, fake_subnet1],
ports=[fake_port1]))
isolated_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[fake_port1]))
nonisolated_dist_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
empty_network = dhcp.NetModel(
True, dict(
id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_subnet1],
ports=[]))
fake_meta_network = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port]))
fake_meta_dvr_network = dhcp.NetModel(True, fake_meta_network.copy())
fake_meta_dvr_network.ports = [fake_meta_dvr_port]
fake_dist_network = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[fake_meta_subnet],
ports=[fake_meta_port, fake_dist_port]))
fake_down_network = dhcp.NetModel(
True, dict(id='12345678-dddd-dddd-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=False,
subnets=[],
ports=[]))
class TestDhcpAgent(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgent, self).setUp()
entry.register_options(cfg.CONF)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
# disable setting up periodic state reporting
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.driver_cls_p = mock.patch(
'neutron.agent.dhcp.agent.importutils.import_class')
self.driver = mock.Mock(name='driver')
self.driver.existing_dhcp_networks.return_value = []
self.driver_cls = self.driver_cls_p.start()
self.driver_cls.return_value = self.driver
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
def test_init_host(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp, 'sync_state') as sync_state:
dhcp.init_host()
sync_state.assert_called_once_with()
def test_dhcp_agent_manager(self):
state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI'
# sync_state is needed for this test
cfg.CONF.set_override('report_interval', 1, 'AGENT')
with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
'sync_state',
autospec=True) as mock_sync_state:
with mock.patch.object(dhcp_agent.DhcpAgentWithStateReport,
'periodic_resync',
autospec=True) as mock_periodic_resync:
with mock.patch(state_rpc_str) as state_rpc:
with mock.patch.object(sys, 'argv') as sys_argv:
sys_argv.return_value = [
'dhcp', '--config-file',
base.etcdir('neutron.conf')]
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(interface.OPTS)
common_config.init(sys.argv[1:])
agent_mgr = dhcp_agent.DhcpAgentWithStateReport(
'testhost')
eventlet.greenthread.sleep(1)
agent_mgr.after_start()
mock_sync_state.assert_called_once_with(agent_mgr)
mock_periodic_resync.assert_called_once_with(agent_mgr)
state_rpc.assert_has_calls(
[mock.call(mock.ANY),
mock.call().report_state(mock.ANY, mock.ANY,
mock.ANY)])
def test_dhcp_agent_main_agent_manager(self):
logging_str = 'neutron.agent.common.config.setup_logging'
launcher_str = 'oslo_service.service.ServiceLauncher'
with mock.patch(logging_str):
with mock.patch.object(sys, 'argv') as sys_argv:
with mock.patch(launcher_str) as launcher:
sys_argv.return_value = ['dhcp', '--config-file',
base.etcdir('neutron.conf')]
entry.main()
launcher.assert_has_calls(
[mock.call(cfg.CONF),
mock.call().launch_service(mock.ANY),
mock.call().wait()])
def test_run_completes_single_pass(self):
with mock.patch(DEVICE_MANAGER):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['sync_state', 'periodic_resync']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
dhcp.run()
mocks['sync_state'].assert_called_once_with()
mocks['periodic_resync'].assert_called_once_with()
def test_call_driver(self):
network = mock.Mock()
network.id = '1'
dhcp = dhcp_agent.DhcpAgent(cfg.CONF)
self.assertTrue(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
def _test_call_driver_failure(self, exc=None,
trace_level='exception', expected_sync=True):
network = mock.Mock()
network.id = '1'
self.driver.return_value.foo.side_effect = exc or Exception
with mock.patch.object(dhcp_agent.LOG, trace_level) as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp,
'schedule_resync') as schedule_resync:
self.assertIsNone(dhcp.call_driver('foo', network))
self.driver.assert_called_once_with(cfg.CONF,
mock.ANY,
mock.ANY,
mock.ANY,
mock.ANY)
self.assertEqual(log.call_count, 1)
self.assertEqual(expected_sync, schedule_resync.called)
def test_call_driver_ip_address_generation_failure(self):
error = oslo_messaging.RemoteError(
exc_type='IpAddressGenerationFailure')
self._test_call_driver_failure(exc=error, expected_sync=False)
def test_call_driver_failure(self):
self._test_call_driver_failure()
def test_call_driver_remote_error_net_not_found(self):
self._test_call_driver_failure(
exc=oslo_messaging.RemoteError(exc_type='NetworkNotFound'),
trace_level='warning')
def test_call_driver_network_not_found(self):
self._test_call_driver_failure(
exc=exceptions.NetworkNotFound(net_id='1'),
trace_level='warning')
def test_call_driver_conflict(self):
self._test_call_driver_failure(
exc=exceptions.Conflict(),
trace_level='warning',
expected_sync=False)
def _test_sync_state_helper(self, known_net_ids, active_net_ids):
active_networks = set(mock.Mock(id=netid) for netid in active_net_ids)
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.return_value = active_networks
plug.return_value = mock_plugin
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
attrs_to_mock = dict([(a, mock.DEFAULT)
for a in ['disable_dhcp_helper', 'cache',
'safe_configure_dhcp_for_network']])
with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks:
mocks['cache'].get_network_ids.return_value = known_net_ids
dhcp.sync_state()
diff = set(known_net_ids) - set(active_net_ids)
exp_disable = [mock.call(net_id) for net_id in diff]
mocks['cache'].assert_has_calls([mock.call.get_network_ids()])
mocks['disable_dhcp_helper'].assert_has_calls(exp_disable)
def test_sync_state_initial(self):
self._test_sync_state_helper([], ['a'])
def test_sync_state_same(self):
self._test_sync_state_helper(['a'], ['a'])
def test_sync_state_disabled_net(self):
self._test_sync_state_helper(['b'], ['a'])
def test_sync_state_waitall(self):
with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w:
active_net_ids = ['1', '2', '3', '4', '5']
known_net_ids = ['1', '2', '3', '4', '5']
self._test_sync_state_helper(known_net_ids, active_net_ids)
w.assert_called_once_with()
def test_sync_state_plugin_error(self):
with mock.patch(DHCP_PLUGIN) as plug:
mock_plugin = mock.Mock()
mock_plugin.get_active_networks_info.side_effect = Exception
plug.return_value = mock_plugin
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp,
'schedule_resync') as schedule_resync:
dhcp.sync_state()
self.assertTrue(log.called)
self.assertTrue(schedule_resync.called)
def test_periodic_resync(self):
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
with mock.patch.object(dhcp_agent.eventlet, 'spawn') as spawn:
dhcp.periodic_resync()
spawn.assert_called_once_with(dhcp._periodic_resync_helper)
def test_periodic_resync_helper(self):
with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
resync_reasons = collections.OrderedDict(
(('a', 'reason1'), ('b', 'reason2')))
dhcp.needs_resync_reasons = resync_reasons
with mock.patch.object(dhcp, 'sync_state') as sync_state:
sync_state.side_effect = RuntimeError
with testtools.ExpectedException(RuntimeError):
dhcp._periodic_resync_helper()
sync_state.assert_called_once_with(resync_reasons.keys())
sleep.assert_called_once_with(dhcp.conf.resync_interval)
self.assertEqual(len(dhcp.needs_resync_reasons), 0)
def test_populate_cache_on_start_without_active_networks_support(self):
# emul dhcp driver that doesn't support retrieving of active networks
self.driver.existing_dhcp_networks.side_effect = NotImplementedError
with mock.patch.object(dhcp_agent.LOG, 'debug') as log:
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
)
self.assertFalse(dhcp.cache.get_network_ids())
self.assertTrue(log.called)
def test_populate_cache_on_start(self):
networks = ['aaa', 'bbb']
self.driver.existing_dhcp_networks.return_value = networks
dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.driver.existing_dhcp_networks.assert_called_once_with(
dhcp.conf,
)
self.assertEqual(set(networks), set(dhcp.cache.get_network_ids()))
def test_none_interface_driver(self):
cfg.CONF.set_override('interface_driver', None)
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, None)
def test_nonexistent_interface_driver(self):
# Temporarily turn off mock, so could use the real import_class
# to import interface_driver.
self.driver_cls_p.stop()
self.addCleanup(self.driver_cls_p.start)
cfg.CONF.set_override('interface_driver', 'foo.bar')
self.assertRaises(SystemExit, dhcp.DeviceManager,
cfg.CONF, None)
class TestLogArgs(base.BaseTestCase):
def test_log_args_without_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': None,
'log_file': None,
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--use-syslog',
'--syslog-log-facility=LOG_USER']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_file(self):
conf_dict = {'debug': True,
'verbose': True,
'log_dir': '/etc/tests',
'log_file': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_log_dir_and_file(self):
conf_dict = {'debug': True,
'verbose': False,
'log_dir': '/etc/tests',
'log_file': 'tests/filelog',
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_without_log_dir(self):
conf_dict = {'debug': True,
'verbose': False,
'log_file': 'tests/filelog',
'log_dir': None,
'use_syslog': False,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--log-file=log_file_name',
'--log-dir=tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
def test_log_args_with_filelog_and_syslog(self):
conf_dict = {'debug': True,
'verbose': True,
'log_file': 'tests/filelog',
'log_dir': '/etc/tests',
'use_syslog': True,
'syslog_log_facility': 'LOG_USER'}
conf = dhcp.DictModel(conf_dict)
expected_args = ['--debug',
'--verbose',
'--log-file=log_file_name',
'--log-dir=/etc/tests/tests']
args = config.get_log_args(conf, 'log_file_name')
self.assertEqual(expected_args, args)
class TestDhcpAgentEventHandler(base.BaseTestCase):
def setUp(self):
super(TestDhcpAgentEventHandler, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
entry.register_options(cfg.CONF) # register all dhcp cfg options
self.plugin_p = mock.patch(DHCP_PLUGIN)
plugin_cls = self.plugin_p.start()
self.plugin = mock.Mock()
plugin_cls.return_value = self.plugin
self.cache_p = mock.patch('neutron.agent.dhcp.agent.NetworkCache')
cache_cls = self.cache_p.start()
self.cache = mock.Mock()
cache_cls.return_value = self.cache
self.mock_makedirs_p = mock.patch("os.makedirs")
self.mock_makedirs = self.mock_makedirs_p.start()
self.mock_init_p = mock.patch('neutron.agent.dhcp.agent.'
'DhcpAgent._populate_networks_cache')
self.mock_init = self.mock_init_p.start()
self.dhcp = dhcp_agent.DhcpAgent(HOSTNAME)
self.call_driver_p = mock.patch.object(self.dhcp, 'call_driver')
self.call_driver = self.call_driver_p.start()
self.schedule_resync_p = mock.patch.object(self.dhcp,
'schedule_resync')
self.schedule_resync = self.schedule_resync_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager'
)
self.external_process = self.external_process_p.start()
def _process_manager_constructor_call(self, ns=FAKE_NETWORK_DHCP_NS):
return mock.call(conf=cfg.CONF,
uuid=FAKE_NETWORK_UUID,
namespace=ns,
default_cmd_callback=mock.ANY)
def _enable_dhcp_helper(self, network, enable_isolated_metadata=False,
is_isolated_network=False):
self.dhcp._process_monitor = mock.Mock()
if enable_isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.plugin.get_network_info.return_value = network
self.dhcp.enable_dhcp_helper(network.id)
self.plugin.assert_has_calls([
mock.call.get_network_info(network.id)])
self.call_driver.assert_called_once_with('enable', network)
self.cache.assert_has_calls([mock.call.put(network)])
if is_isolated_network:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(),
mock.call().enable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_enable_dhcp_helper_enable_metadata_isolated_network(self):
self._enable_dhcp_helper(isolated_network,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_no_gateway(self):
isolated_network_no_gateway = copy.deepcopy(isolated_network)
isolated_network_no_gateway.subnets[0].gateway_ip = None
self._enable_dhcp_helper(isolated_network_no_gateway,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_nonisolated_network(self):
nonisolated_network = copy.deepcopy(isolated_network)
nonisolated_network.ports[0].device_owner = (
const.DEVICE_OWNER_ROUTER_INTF)
nonisolated_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
self._enable_dhcp_helper(nonisolated_network,
enable_isolated_metadata=True,
is_isolated_network=False)
def test_enable_dhcp_helper_enable_metadata_nonisolated_dist_network(self):
nonisolated_dist_network.ports[0].device_owner = (
const.DEVICE_OWNER_ROUTER_INTF)
nonisolated_dist_network.ports[0].fixed_ips[0].ip_address = '172.9.9.1'
nonisolated_dist_network.ports[1].device_owner = (
const.DEVICE_OWNER_DVR_INTERFACE)
nonisolated_dist_network.ports[1].fixed_ips[0].ip_address = '172.9.9.1'
self._enable_dhcp_helper(nonisolated_dist_network,
enable_isolated_metadata=True,
is_isolated_network=False)
def test_enable_dhcp_helper_enable_metadata_empty_network(self):
self._enable_dhcp_helper(empty_network,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_enable_metadata_ipv6_ipv4_network(self):
self._enable_dhcp_helper(fake_network_ipv6_ipv4,
enable_isolated_metadata=True,
is_isolated_network=True)
def test_enable_dhcp_helper_driver_failure_ipv6_ipv4_network(self):
self.plugin.get_network_info.return_value = fake_network_ipv6_ipv4
self.call_driver.return_value = False
cfg.CONF.set_override('enable_isolated_metadata', True)
with mock.patch.object(
self.dhcp, 'enable_isolated_metadata_proxy') as enable_metadata:
self.dhcp.enable_dhcp_helper(fake_network_ipv6_ipv4.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network_ipv6_ipv4.id)])
self.call_driver.assert_called_once_with('enable',
fake_network_ipv6_ipv4)
self.assertFalse(self.cache.called)
self.assertFalse(enable_metadata.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper(self):
self._enable_dhcp_helper(fake_network)
def test_enable_dhcp_helper_ipv6_network(self):
self._enable_dhcp_helper(fake_network_ipv6)
def test_enable_dhcp_helper_down_network(self):
self.plugin.get_network_info.return_value = fake_down_network
self.dhcp.enable_dhcp_helper(fake_down_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_down_network.id)])
self.assertFalse(self.call_driver.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_network_none(self):
self.plugin.get_network_info.return_value = None
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
self.dhcp.enable_dhcp_helper('fake_id')
self.plugin.assert_has_calls(
[mock.call.get_network_info('fake_id')])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertFalse(self.dhcp.schedule_resync.called)
def test_enable_dhcp_helper_exception_during_rpc(self):
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.assertFalse(self.call_driver.called)
self.assertTrue(log.called)
self.assertTrue(self.schedule_resync.called)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def test_enable_dhcp_helper_driver_failure(self):
self.plugin.get_network_info.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.enable_dhcp_helper(fake_network.id)
self.plugin.assert_has_calls(
[mock.call.get_network_info(fake_network.id)])
self.call_driver.assert_called_once_with('enable', fake_network)
self.assertFalse(self.cache.called)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_known_network(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
if isolated_metadata:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(ns=None),
mock.call().disable()])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_known_network_isolated_metadata(self):
self._disable_dhcp_helper_known_network(isolated_metadata=True)
def test_disable_dhcp_helper_known_network(self):
self._disable_dhcp_helper_known_network()
def test_disable_dhcp_helper_unknown_network(self):
self.cache.get_network_by_id.return_value = None
self.dhcp.disable_dhcp_helper('abcdef')
self.cache.assert_has_calls(
[mock.call.get_network_by_id('abcdef')])
self.assertEqual(0, self.call_driver.call_count)
self.assertFalse(self.external_process.called)
def _disable_dhcp_helper_driver_failure(self, isolated_metadata=False):
if isolated_metadata:
cfg.CONF.set_override('enable_isolated_metadata', True)
self.cache.get_network_by_id.return_value = fake_network
self.call_driver.return_value = False
self.dhcp.disable_dhcp_helper(fake_network.id)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
self.call_driver.assert_called_once_with('disable', fake_network)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_network.id)])
if isolated_metadata:
self.external_process.assert_has_calls([
self._process_manager_constructor_call(ns=None),
mock.call().disable()
])
else:
self.assertFalse(self.external_process.call_count)
def test_disable_dhcp_helper_driver_failure_isolated_metadata(self):
self._disable_dhcp_helper_driver_failure(isolated_metadata=True)
def test_disable_dhcp_helper_driver_failure(self):
self._disable_dhcp_helper_driver_failure()
def test_enable_isolated_metadata_proxy(self):
self.dhcp._process_monitor = mock.Mock()
self.dhcp.enable_isolated_metadata_proxy(fake_network)
self.external_process.assert_has_calls([
self._process_manager_constructor_call(),
mock.call().enable()
])
def test_disable_isolated_metadata_proxy(self):
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
'.destroy_monitored_metadata_proxy')
with mock.patch(method_path) as destroy:
self.dhcp.disable_isolated_metadata_proxy(fake_network)
destroy.assert_called_once_with(self.dhcp._process_monitor,
fake_network.id,
cfg.CONF)
def _test_metadata_network(self, network):
cfg.CONF.set_override('enable_metadata_network', True)
cfg.CONF.set_override('debug', True)
cfg.CONF.set_override('verbose', False)
cfg.CONF.set_override('log_file', 'test.log')
method_path = ('neutron.agent.metadata.driver.MetadataDriver'
'.spawn_monitored_metadata_proxy')
with mock.patch(method_path) as spawn:
self.dhcp.enable_isolated_metadata_proxy(network)
spawn.assert_called_once_with(self.dhcp._process_monitor,
network.namespace,
dhcp.METADATA_PORT,
cfg.CONF,
router_id='forzanapoli')
def test_enable_isolated_metadata_proxy_with_metadata_network(self):
self._test_metadata_network(fake_meta_network)
def test_enable_isolated_metadata_proxy_with_metadata_network_dvr(self):
self._test_metadata_network(fake_meta_dvr_network)
def test_enable_isolated_metadata_proxy_with_dist_network(self):
self._test_metadata_network(fake_dist_network)
def test_network_create_end(self):
payload = dict(network=dict(id=fake_network.id))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_create_end(None, payload)
enable.assert_called_once_with(fake_network.id)
def test_network_update_end_admin_state_up(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=True))
with mock.patch.object(self.dhcp, 'enable_dhcp_helper') as enable:
self.dhcp.network_update_end(None, payload)
enable.assert_called_once_with(fake_network.id)
def test_network_update_end_admin_state_down(self):
payload = dict(network=dict(id=fake_network.id, admin_state_up=False))
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_update_end(None, payload)
disable.assert_called_once_with(fake_network.id)
def test_network_delete_end(self):
payload = dict(network_id=fake_network.id)
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.network_delete_end(None, payload)
disable.assert_called_once_with(fake_network.id)
def test_refresh_dhcp_helper_no_dhcp_enabled_networks(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.return_value = network
with mock.patch.object(self.dhcp, 'disable_dhcp_helper') as disable:
self.dhcp.refresh_dhcp_helper(network.id)
disable.assert_called_once_with(network.id)
self.assertFalse(self.cache.called)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
def test_refresh_dhcp_helper_exception_during_rpc(self):
network = dhcp.NetModel(True, dict(id='net-id',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
admin_state_up=True,
subnets=[],
ports=[]))
self.cache.get_network_by_id.return_value = network
self.plugin.get_network_info.side_effect = Exception
with mock.patch.object(dhcp_agent.LOG, 'exception') as log:
self.dhcp.refresh_dhcp_helper(network.id)
self.assertFalse(self.call_driver.called)
self.cache.assert_has_calls(
[mock.call.get_network_by_id('net-id')])
self.assertTrue(log.called)
self.assertTrue(self.dhcp.schedule_resync.called)
def test_subnet_update_end(self):
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_subnet_update_end_restart(self):
new_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet=dict(network_id=fake_network.id))
self.cache.get_network_by_id.return_value = fake_network
self.plugin.get_network_info.return_value = new_state
self.dhcp.subnet_update_end(None, payload)
self.cache.assert_has_calls([mock.call.put(new_state)])
self.call_driver.assert_called_once_with('restart',
new_state)
def test_subnet_update_end_delete_payload(self):
prev_state = dhcp.NetModel(True, dict(id=fake_network.id,
tenant_id=fake_network.tenant_id,
admin_state_up=True,
subnets=[fake_subnet1, fake_subnet3],
ports=[fake_port1]))
payload = dict(subnet_id=fake_subnet1.id)
self.cache.get_network_by_subnet_id.return_value = prev_state
self.cache.get_network_by_id.return_value = prev_state
self.plugin.get_network_info.return_value = fake_network
self.dhcp.subnet_delete_end(None, payload)
self.cache.assert_has_calls([
mock.call.get_network_by_subnet_id(
'bbbbbbbb-bbbb-bbbb-bbbbbbbbbbbb'),
mock.call.get_network_by_id('12345678-1234-5678-1234567890ab'),
mock.call.put(fake_network)])
self.call_driver.assert_called_once_with('restart',
fake_network)
def test_port_update_end(self):
payload = dict(port=fake_port2)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port2.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_called_once_with('reload_allocations',
fake_network)
def test_port_update_change_ip_on_port(self):
payload = dict(port=fake_port1)
self.cache.get_network_by_id.return_value = fake_network
updated_fake_port1 = copy.deepcopy(fake_port1)
updated_fake_port1.fixed_ips[0].ip_address = '172.9.9.99'
self.cache.get_port_by_id.return_value = updated_fake_port1
self.dhcp.port_update_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_network_by_id(fake_port1.network_id),
mock.call.put_port(mock.ANY)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_update_change_ip_on_dhcp_agents_port(self):
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port1
payload = dict(port=copy.deepcopy(fake_port1))
device_id = utils.get_dhcp_agent_device_id(
payload['port']['network_id'], self.dhcp.conf.host)
payload['port']['fixed_ips'][0]['ip_address'] = '172.9.9.99'
payload['port']['device_id'] = device_id
self.dhcp.port_update_end(None, payload)
self.call_driver.assert_has_calls(
[mock.call.call_driver('restart', fake_network)])
def test_port_update_on_dhcp_agents_port_no_ip_change(self):
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port1
payload = dict(port=fake_port1)
device_id = utils.get_dhcp_agent_device_id(
payload['port']['network_id'], self.dhcp.conf.host)
payload['port']['device_id'] = device_id
self.dhcp.port_update_end(None, payload)
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end(self):
payload = dict(port_id=fake_port2.id)
self.cache.get_network_by_id.return_value = fake_network
self.cache.get_port_by_id.return_value = fake_port2
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls(
[mock.call.get_port_by_id(fake_port2.id),
mock.call.get_network_by_id(fake_network.id),
mock.call.remove_port(fake_port2)])
self.call_driver.assert_has_calls(
[mock.call.call_driver('reload_allocations', fake_network)])
def test_port_delete_end_unknown_port(self):
payload = dict(port_id='unknown')
self.cache.get_port_by_id.return_value = None
self.dhcp.port_delete_end(None, payload)
self.cache.assert_has_calls([mock.call.get_port_by_id('unknown')])
self.assertEqual(self.call_driver.call_count, 0)
class TestDhcpPluginApiProxy(base.BaseTestCase):
def _test_dhcp_api(self, method, **kwargs):
ctxt = context.get_admin_context()
proxy = dhcp_agent.DhcpPluginApi('foo', ctxt, None, host='foo')
with mock.patch.object(proxy.client, 'call') as rpc_mock,\
mock.patch.object(proxy.client, 'prepare') as prepare_mock:
prepare_mock.return_value = proxy.client
rpc_mock.return_value = kwargs.pop('return_value', [])
prepare_args = {}
if 'version' in kwargs:
prepare_args['version'] = kwargs.pop('version')
retval = getattr(proxy, method)(**kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with(**prepare_args)
kwargs['host'] = proxy.host
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_get_active_networks_info(self):
self._test_dhcp_api('get_active_networks_info', version='1.1')
def test_get_network_info(self):
self._test_dhcp_api('get_network_info', network_id='fake_id',
return_value=None)
def test_create_dhcp_port(self):
self._test_dhcp_api('create_dhcp_port', port='fake_port',
return_value=None, version='1.1')
def test_update_dhcp_port(self):
self._test_dhcp_api('update_dhcp_port', port_id='fake_id',
port='fake_port', return_value=None, version='1.1')
def test_release_dhcp_port(self):
self._test_dhcp_api('release_dhcp_port', network_id='fake_id',
device_id='fake_id_2')
def test_release_port_fixed_ip(self):
self._test_dhcp_api('release_port_fixed_ip', network_id='fake_id',
device_id='fake_id_2', subnet_id='fake_id_3')
class TestNetworkCache(base.BaseTestCase):
def test_put_network(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_put_network_existing(self):
prev_network_info = mock.Mock()
nc = dhcp_agent.NetworkCache()
with mock.patch.object(nc, 'remove') as remove:
nc.cache[fake_network.id] = prev_network_info
nc.put(fake_network)
remove.assert_called_once_with(prev_network_info)
self.assertEqual(nc.cache,
{fake_network.id: fake_network})
self.assertEqual(nc.subnet_lookup,
{fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id})
self.assertEqual(nc.port_lookup,
{fake_port1.id: fake_network.id})
def test_remove_network(self):
nc = dhcp_agent.NetworkCache()
nc.cache = {fake_network.id: fake_network}
nc.subnet_lookup = {fake_subnet1.id: fake_network.id,
fake_subnet2.id: fake_network.id}
nc.port_lookup = {fake_port1.id: fake_network.id}
nc.remove(fake_network)
self.assertEqual(len(nc.cache), 0)
self.assertEqual(len(nc.subnet_lookup), 0)
self.assertEqual(len(nc.port_lookup), 0)
def test_get_network_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_id(fake_network.id), fake_network)
def test_get_network_ids(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(list(nc.get_network_ids()), [fake_network.id])
def test_get_network_by_subnet_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_subnet_id(fake_subnet1.id),
fake_network)
def test_get_network_by_port_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_network_by_port_id(fake_port1.id),
fake_network)
def test_put_port(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_put_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.put_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 2)
self.assertIn(fake_port2, fake_net.ports)
def test_remove_port_existing(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa',
subnets=[fake_subnet1],
ports=[fake_port1, fake_port2]))
nc = dhcp_agent.NetworkCache()
nc.put(fake_net)
nc.remove_port(fake_port2)
self.assertEqual(len(nc.port_lookup), 1)
self.assertNotIn(fake_port2, fake_net.ports)
def test_get_port_by_id(self):
nc = dhcp_agent.NetworkCache()
nc.put(fake_network)
self.assertEqual(nc.get_port_by_id(fake_port1.id), fake_port1)
class FakePort1(object):
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
class FakeV4Subnet(object):
id = 'dddddddd-dddd-dddd-dddd-dddddddddddd'
ip_version = 4
cidr = '192.168.0.0/24'
gateway_ip = '192.168.0.1'
enable_dhcp = True
class FakeV4SubnetNoGateway(object):
id = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
ip_version = 4
cidr = '192.168.1.0/24'
gateway_ip = None
enable_dhcp = True
class FakeV4Network(object):
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = [FakeV4Subnet()]
ports = [FakePort1()]
namespace = 'qdhcp-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeV4NetworkNoSubnet(object):
id = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
subnets = []
ports = []
class FakeV4NetworkNoGateway(object):
id = 'cccccccc-cccc-cccc-cccc-cccccccccccc'
subnets = [FakeV4SubnetNoGateway()]
ports = [FakePort1()]
class TestDeviceManager(base.BaseTestCase):
def setUp(self):
super(TestDeviceManager, self).setUp()
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS)
cfg.CONF.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_isolated_metadata', True)
self.ensure_device_is_ready_p = mock.patch(
'neutron.agent.linux.ip_lib.ensure_device_is_ready')
self.ensure_device_is_ready = (self.ensure_device_is_ready_p.start())
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
self.iproute_cls_p = mock.patch('neutron.agent.linux.'
'ip_lib.IpRouteCommand')
driver_cls = self.dvr_cls_p.start()
iproute_cls = self.iproute_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
self.mock_driver.use_gateway_ips = False
self.mock_iproute = mock.MagicMock()
driver_cls.return_value = self.mock_driver
iproute_cls.return_value = self.mock_iproute
iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
iptables_cls = iptables_cls_p.start()
self.iptables_inst = mock.Mock()
iptables_cls.return_value = self.iptables_inst
self.mangle_inst = mock.Mock()
self.iptables_inst.ipv4 = {'mangle': self.mangle_inst}
def _test_setup_helper(self, device_is_ready, net=None, port=None):
net = net or fake_network
port = port or fake_port1
plugin = mock.Mock()
plugin.create_dhcp_port.return_value = port or fake_port1
self.ensure_device_is_ready.return_value = device_is_ready
self.mock_driver.get_device_name.return_value = 'tap12345678-12'
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh._set_default_route = mock.Mock()
interface_name = dh.setup(net)
self.assertEqual(interface_name, 'tap12345678-12')
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id': net.id, 'tenant_id': net.tenant_id,
'fixed_ips':
[{'subnet_id': port.fixed_ips[0].subnet_id}],
'device_id': mock.ANY}})])
if port == fake_ipv6_port:
expected_ips = ['169.254.169.254/16']
else:
expected_ips = ['172.9.9.9/24', '169.254.169.254/16']
expected = [
mock.call.get_device_name(port),
mock.call.init_l3(
'tap12345678-12',
expected_ips,
namespace=net.namespace)]
if not device_is_ready:
expected.insert(1,
mock.call.plug(net.id,
port.id,
'tap12345678-12',
'aa:bb:cc:dd:ee:ff',
namespace=net.namespace))
self.mock_driver.assert_has_calls(expected)
dh._set_default_route.assert_called_once_with(net, 'tap12345678-12')
def test_setup(self):
cfg.CONF.set_override('enable_metadata_network', False)
self._test_setup_helper(False)
cfg.CONF.set_override('enable_metadata_network', True)
self._test_setup_helper(False)
def test_setup_calls_fill_dhcp_udp_checksums(self):
self._test_setup_helper(False)
rule = ('-p udp --dport %d -j CHECKSUM --checksum-fill'
% const.DHCP_RESPONSE_PORT)
expected = [mock.call.add_rule('POSTROUTING', rule)]
self.mangle_inst.assert_has_calls(expected)
def test_setup_create_dhcp_port(self):
plugin = mock.Mock()
net = copy.deepcopy(fake_network)
plugin.create_dhcp_port.return_value = fake_dhcp_port
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.setup(net)
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id': net.id,
'tenant_id': net.tenant_id,
'fixed_ips': [{'subnet_id':
fake_dhcp_port.fixed_ips[0].subnet_id}],
'device_id': mock.ANY}})])
self.assertIn(fake_dhcp_port, net.ports)
def test_setup_ipv6(self):
self._test_setup_helper(True, net=fake_network_ipv6,
port=fake_ipv6_port)
def test_setup_device_is_ready(self):
self._test_setup_helper(True)
def test_create_dhcp_port_raise_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
plugin.create_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network)
def test_create_dhcp_port_create_new(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
plugin.create_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network)
plugin.assert_has_calls([
mock.call.create_dhcp_port(
{'port': {'name': '', 'admin_state_up': True,
'network_id':
fake_network.id, 'tenant_id': fake_network.tenant_id,
'fixed_ips':
[{'subnet_id': fake_fixed_ip1.subnet_id}],
'device_id': mock.ANY}})])
def test_create_dhcp_port_update_add_subnet(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = fake_network.ports[0]
dh.setup_dhcp_port(fake_network_copy)
port_body = {'port': {
'network_id': fake_network.id,
'fixed_ips': [{'subnet_id': fake_fixed_ip1.subnet_id,
'ip_address': fake_fixed_ip1.ip_address},
{'subnet_id': fake_subnet2.id}]}}
plugin.assert_has_calls([
mock.call.update_dhcp_port(fake_network_copy.ports[0].id,
port_body)])
def test_update_dhcp_port_raises_conflict(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
fake_network_copy.subnets[1].enable_dhcp = True
plugin.update_dhcp_port.return_value = None
self.assertRaises(exceptions.Conflict,
dh.setup_dhcp_port,
fake_network_copy)
def test_create_dhcp_port_no_update_or_create(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
dh.setup_dhcp_port(fake_network_copy)
self.assertFalse(plugin.setup_dhcp_port.called)
self.assertFalse(plugin.update_dhcp_port.called)
def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self):
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
fake_network_copy = copy.deepcopy(fake_network)
fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network)
plugin.update_dhcp_port.return_value = fake_port1
self.assertEqual(fake_subnet1.id,
dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id)
def test_destroy(self):
fake_net = dhcp.NetModel(
True, dict(id=FAKE_NETWORK_UUID,
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.destroy(fake_net, 'tap12345678-12')
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.unplug('tap12345678-12',
namespace='qdhcp-' + fake_net.id)])
plugin.assert_has_calls(
[mock.call.release_dhcp_port(fake_net.id, mock.ANY)])
def test_get_interface_name(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
fake_port = dhcp.DictModel(
dict(id='12345678-1234-aaaa-1234567890ab',
mac_address='aa:bb:cc:dd:ee:ff'))
with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls:
mock_driver = mock.MagicMock()
mock_driver.get_device_name.return_value = 'tap12345678-12'
dvr_cls.return_value = mock_driver
plugin = mock.Mock()
dh = dhcp.DeviceManager(cfg.CONF, plugin)
dh.get_interface_name(fake_net, fake_port)
dvr_cls.assert_called_once_with(cfg.CONF)
mock_driver.assert_has_calls(
[mock.call.get_device_name(fake_port)])
self.assertEqual(len(plugin.mock_calls), 0)
def test_get_device_id(self):
fake_net = dhcp.NetModel(
True, dict(id='12345678-1234-5678-1234567890ab',
tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa'))
expected = ('dhcp1ae5f96c-c527-5079-82ea-371a01645457-12345678-1234-'
'5678-1234567890ab')
with mock.patch('uuid.uuid5') as uuid5:
uuid5.return_value = '1ae5f96c-c527-5079-82ea-371a01645457'
dh = dhcp.DeviceManager(cfg.CONF, None)
uuid5.called_once_with(uuid.NAMESPACE_DNS, cfg.CONF.host)
self.assertEqual(dh.get_device_id(fake_net), expected)
def test_update(self):
# Try with namespaces and no metadata network
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
network = mock.Mock()
dh.update(network, 'ns-12345678-12')
dh._set_default_route.assert_called_once_with(network,
'ns-12345678-12')
# No namespaces, shouldn't set default route.
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', False)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'tap12345678-12')
self.assertFalse(dh._set_default_route.called)
# Meta data network enabled, don't interfere with its gateway.
cfg.CONF.set_override('use_namespaces', True)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'ns-12345678-12')
self.assertTrue(dh._set_default_route.called)
# For completeness
cfg.CONF.set_override('use_namespaces', False)
cfg.CONF.set_override('enable_metadata_network', True)
dh = dhcp.DeviceManager(cfg.CONF, None)
dh._set_default_route = mock.Mock()
dh.update(FakeV4Network(), 'ns-12345678-12')
self.assertFalse(dh._set_default_route.called)
def test_set_default_route(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
# Basic one subnet with gateway.
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_no_subnet(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
network = FakeV4NetworkNoSubnet()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_subnet_delete_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4NetworkNoSubnet()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_no_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4NetworkNoGateway()
network.namespace = 'qdhcp-1234'
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
device.route.delete_gateway.assert_called_once_with('192.168.0.1')
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_do_nothing(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.1')
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
self.assertFalse(device.route.add_gateway.called)
def test_set_default_route_change_gateway(self):
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = dict(gateway='192.168.0.2')
network = FakeV4Network()
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.0.1')
def test_set_default_route_two_subnets(self):
# Try two subnets. Should set gateway from the first.
dh = dhcp.DeviceManager(cfg.CONF, None)
with mock.patch.object(dhcp.ip_lib, 'IPDevice') as mock_IPDevice:
device = mock.Mock()
mock_IPDevice.return_value = device
device.route.get_gateway.return_value = None
network = FakeV4Network()
subnet2 = FakeV4Subnet()
subnet2.gateway_ip = '192.168.1.1'
network.subnets = [subnet2, FakeV4Subnet()]
dh._set_default_route(network, 'tap-name')
self.assertEqual(device.route.get_gateway.call_count, 1)
self.assertFalse(device.route.delete_gateway.called)
device.route.add_gateway.assert_called_once_with('192.168.1.1')
class TestDictModel(base.BaseTestCase):
def test_basic_dict(self):
d = dict(a=1, b=2)
m = dhcp.DictModel(d)
self.assertEqual(m.a, 1)
self.assertEqual(m.b, 2)
def test_dict_has_sub_dict(self):
d = dict(a=dict(b=2))
m = dhcp.DictModel(d)
self.assertEqual(m.a.b, 2)
def test_dict_contains_list(self):
d = dict(a=[1, 2])
m = dhcp.DictModel(d)
self.assertEqual(m.a, [1, 2])
def test_dict_contains_list_of_dicts(self):
d = dict(a=[dict(b=2), dict(c=3)])
m = dhcp.DictModel(d)
self.assertEqual(m.a[0].b, 2)
self.assertEqual(m.a[1].c, 3)
class TestNetModel(base.BaseTestCase):
def test_ns_name(self):
network = dhcp.NetModel(True, {'id': 'foo'})
self.assertEqual(network.namespace, 'qdhcp-foo')
def test_ns_name_false_namespace(self):
network = dhcp.NetModel(False, {'id': 'foo'})
self.assertIsNone(network.namespace)
def test_ns_name_none_namespace(self):
network = dhcp.NetModel(None, {'id': 'foo'})
self.assertIsNone(network.namespace)
| vivekanand1101/neutron | neutron/tests/unit/agent/dhcp/test_agent.py | Python | apache-2.0 | 70,601 |
# Class definition:
# NordugridATLASExperiment
# This class is the ATLAS experiment class for Nordugrid inheriting from Experiment
# Instances are generated with ExperimentFactory via pUtil::getExperiment()
# import relevant python/pilot modules
from Experiment import Experiment # Main experiment class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import isAnalysisJob # Is the current job a user analysis job or a production job?
from pUtil import verifyReleaseString # To verify the release string (move to Experiment later)
from pUtil import timedCommand # Standard time-out function
from PilotErrors import PilotErrors # Error codes
from ATLASExperiment import ATLASExperiment
# Standard python modules
import os
import re
import commands
from glob import glob
class NordugridATLASExperiment(ATLASExperiment):
# private data members
__experiment = "Nordugrid-ATLAS"
__instance = None
__warning = ""
__analysisJob = False
__job = None
# Required methods
def __init__(self):
""" Default initialization """
# not needed?
# e.g. self.__errorLabel = errorLabel
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASExperiment, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
def setParameters(self, *args, **kwargs):
""" Set any internally needed variables """
# set initial values
self.__job = kwargs.get('job', None)
if self.__job:
self.__analysisJob = isAnalysisJob(self.__job.trf)
else:
self.__warning = "setParameters found no job object"
def getJobExecutionCommandObsolete(self, job, jobSite, pilot_initdir):
""" Define and test the command(s) that will be used to execute the payload """
# Input tuple: (method is called from RunJob*)
# job: Job object
# jobSite: Site object
# pilot_initdir: launch directory of pilot.py
#
# Return tuple:
# pilot_error_code, pilot_error_diagnostics, job_execution_command, special_setup_command, JEM, cmtconfig
# where
# pilot_error_code : self.__error.<PILOT ERROR CODE as defined in PilotErrors class> (value should be 0 for successful setup)
# pilot_error_diagnostics: any output from problematic command or explanatory error diagnostics
# job_execution_command : command to execute payload, e.g. cmd = "source <path>/setup.sh; <path>/python trf.py [options]"
# special_setup_command : any special setup command that can be insterted into job_execution_command and is sent to stage-in/out methods
# JEM : Job Execution Monitor activation state (default value "NO", meaning JEM is not to be used. See JEMstub.py)
# cmtconfig : cmtconfig symbol from the job def or schedconfig, e.g. "x86_64-slc5-gcc43-opt" [NOT USED IN THIS CLASS]
pilotErrorDiag = ""
cmd = ""
special_setup_cmd = ""
pysiteroot = ""
siteroot = ""
JEM = "NO"
cmtconfig = ""
# Is it's an analysis job or not?
analysisJob = isAnalysisJob(job.trf)
# Set the INDS env variable (used by runAthena)
if analysisJob:
self.setINDS(job.realDatasetsIn)
# Command used to download runAthena or runGen
wgetCommand = 'wget'
# special setup for NG
status, pilotErrorDiag, cmd = self.setupNordugridTrf(job, analysisJob, wgetCommand, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# add FRONTIER debugging and RUCIO env variables
cmd = self.addEnvVars2Cmd(cmd, job.jobId, job.taskID, job.processingType, jobSite.sitename, analysisJob)
if readpar('cloud') == "DE":
# Should JEM be used?
metaOut = {}
try:
import sys
from JEMstub import updateRunCommand4JEM
# If JEM should be used, the command will get updated by the JEMstub automatically.
cmd = updateRunCommand4JEM(cmd, job, jobSite, tolog, metaOut=metaOut)
except:
# On failure, cmd stays the same
tolog("Failed to update run command for JEM - will run unmonitored.")
# Is JEM to be used?
if metaOut.has_key("JEMactive"):
JEM = metaOut["JEMactive"]
tolog("Use JEM: %s (dictionary = %s)" % (JEM, str(metaOut)))
elif '--enable-jem' in cmd:
tolog("!!WARNING!!1111!! JEM can currently only be used on certain sites in DE")
# Pipe stdout/err for payload to files
cmd += " 1>%s 2>%s" % (job.stdout, job.stderr)
tolog("\nCommand to run the job is: \n%s" % (cmd))
tolog("ATLAS_PYTHON_PILOT = %s" % (os.environ['ATLAS_PYTHON_PILOT']))
if special_setup_cmd != "":
tolog("Special setup command: %s" % (special_setup_cmd))
return 0, pilotErrorDiag, cmd, special_setup_cmd, JEM, cmtconfig
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return False
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
# Additional optional methods
def setupNordugridTrf(self, job, analysisJob, wgetCommand, pilot_initdir):
""" perform the Nordugrid trf setup """
error = PilotErrors()
pilotErrorDiag = ""
cmd = ""
# assume that the runtime script has already been created
if not os.environ.has_key('RUNTIME_CONFIG_DIR'):
pilotErrorDiag = "Environment variable not set: RUNTIME_CONFIG_DIR"
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
return error.ERR_SETUPFAILURE, pilotErrorDiag, ""
runtime_script = "%s/APPS/HEP/ATLAS-%s" % (os.environ['RUNTIME_CONFIG_DIR'], job.release)
if os.path.exists(runtime_script):
cmd = ". %s 1" % (runtime_script)
if analysisJob:
# try to download the analysis trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, ""
trfName = "./" + trfName
else:
trfName = job.trf
cmd += '; export ATLAS_RELEASE=%s;export AtlasVersion=%s;export AtlasPatchVersion=%s' % (job.homePackage.split('/')[-1],job.homePackage.split('/')[-1],job.homePackage.split('/')[-1])
cmd += "; %s %s" % (trfName, job.jobPars)
elif verifyReleaseString(job.release) == "NULL":
if analysisJob:
# try to download the analysis trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, ""
trfName = "./" + trfName
else:
trfName = job.trf
cmd = "%s %s" % (trfName, job.jobPars)
else:
pilotErrorDiag = "Could not locate runtime script: %s" % (runtime_script)
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
return error.ERR_SETUPFAILURE, pilotErrorDiag, ""
# correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make)
cmd = self.addMAKEFLAGS(job.coreCount, "") + cmd
return 0, pilotErrorDiag, cmd
def getWarning(self):
""" Return any warning message passed to __warning """
return self.__warning
def getReleaseObsolete(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
if os.environ.has_key('Nordugrid_pilot') and os.environ.has_key('ATLAS_RELEASE'):
return os.environ['ATLAS_RELEASE'].split(",")
else:
return release.split("\n")
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
# Set a special env variable that will be used to identify Nordugrid in other pilot classes
os.environ['Nordugrid_pilot'] = ""
# Call the method from the parent class
ec = super(NordugridATLASExperiment, self).checkSpecialEnvVars(sitename)
return ec
# Optional
def shouldExecuteUtility(self):
""" Determine where a memory utility monitor should be executed """
# The RunJob class has the possibility to execute a memory utility monitor that can track the memory usage
# of the payload. The monitor is executed if this method returns True. The monitor is expected to produce
# a summary JSON file whose name is defined by the getMemoryMonitorJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the jobMetrics at the end of the job (see
# PandaServerClient class).
return True
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of the memory monitor JSON file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_summary.json"
def getSetupPath(self, job_command, trf):
""" Get the setup path from the job execution command """
setup = ""
# Trim the trf if necessary (i.e. remove any paths which are present in buildJob jobs)
trf = self.trimTrfName(trf)
# Take care of special cases, e.g. trf="buildJob-.." but job_command="..; ./buildJob-.."
special_case = "./%s" % (trf)
if special_case in job_command:
trf = special_case
# Strip the setup command at the location of the trf name
l = job_command.find(trf)
if l > 0:
setup = job_command[:l]
# Make sure to remove any unwanted white spaces as well
return setup.strip()
def trimTrfName(self, trfName):
""" Remove any unwanted strings from the trfName """
if "/" in trfName:
trfName = os.path.basename(trfName)
return trfName
def updateSetupPathWithReleaseAndCmtconfig(self, setup_path, release, alt_release, patched_release, alt_patched_release, cmtconfig, alt_cmtconfig):
""" Update the setup path with an alternative release, pathched release and cmtconfig """
# This method can be used to modify a setup path with an alternative release, patched release and cmtconfig
# E.g. this can be used by a tool that might want to fall back to a preferred setup
# Correct the release info
if "-" in release: # the cmtconfig is appended, e.g. release='17.2.7-X86_64-SLC5-GCC43-OPT'
cmtconfig = release[release.find('-')+1:]
release = release[:release.find('-')]
# Update the patched release with a tmp string
if patched_release != "" and patched_release in setup_path:
setup_path = setup_path.replace(patched_release, '__PATCHED_RELEASE__')
# Update the release
if release in setup_path:
setup_path = setup_path.replace(release, alt_release)
# Update the patched release
if '__PATCHED_RELEASE__' in setup_path:
setup_path = setup_path.replace('__PATCHED_RELEASE__', alt_patched_release)
# Update the cmtconfig
if cmtconfig != "" and cmtconfig in setup_path:
setup_path = setup_path.replace(cmtconfig, alt_cmtconfig.upper())
return setup_path
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
pid = argdict.get('pid', 0)
summary = self.getUtilityJSONFilename()
workdir = argdict.get('workdir', '.')
interval = 60
default_release = "21.0.22" #"21.0.18" #"21.0.17" #"20.7.5" #"20.1.5"
# default_patch_release = "20.7.5.8" #"20.1.5.2" #"20.1.4.1"
# default_cmtconfig = "x86_64-slc6-gcc49-opt"
default_cmtconfig = "x86_64-slc6-gcc62-opt"
# default_swbase = "%s/atlas.cern.ch/repo/sw/software" % (self.getCVMFSPath())
default_swbase = "%s/atlas.cern.ch/repo" % (self.getCVMFSPath())
default_setup = self.getModernASetup() + " Athena," + default_release + " --platform " + default_cmtconfig
tolog("Will use default (fallback) setup for MemoryMonitor")
cmd = default_setup
# Now add the MemoryMonitor command
cmd += "; MemoryMonitor --pid %d --filename %s --json-summary %s --interval %d" % (pid, self.getUtilityOutputFilename(), summary, interval)
cmd = "cd " + workdir + ";" + cmd
return cmd
if __name__ == "__main__":
print "Implement test cases here"
| PanDAWMS/pilot | NordugridATLASExperiment.py | Python | apache-2.0 | 13,955 |
#! /usr/bin/python
'''
Saves relevant data fed back from TwitterStream etc next to its PID and timestamp ready for analysis
Needs to do limited analysis to work out which keywords in the tweet stream correspond to which programme
'''
from datetime import datetime
import os
import string
import time as time2
from time import time
from Axon.Ipc import producerFinished
from Axon.Ipc import shutdownMicroprocess
from Axon.ThreadedComponent import threadedcomponent
import MySQLdb
import _mysql_exceptions
import cjson
from dateutil.parser import parse
class DataCollector(threadedcomponent):
Inboxes = {
"inbox" : "Receives data in the format [tweetjson,[pid,pid]]",
"control" : ""
}
Outboxes = {
"outbox" : "",
"signal" : ""
}
def __init__(self,dbuser,dbpass):
super(DataCollector, self).__init__()
self.dbuser = dbuser
self.dbpass = dbpass
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def dbConnect(self):
db = MySQLdb.connect(user=self.dbuser,passwd=self.dbpass,db="twitter_bookmarks",use_unicode=True,charset="utf8")
cursor = db.cursor()
return cursor
def main(self):
cursor = self.dbConnect()
while not self.finished():
twitdata = list()
# Collect all current received tweet JSON and their related PIDs into a twitdata list
while self.dataReady("inbox"):
pids = list()
data = self.recv("inbox")
for pid in data[1]:
pids.append(pid)
twitdata.append([data[0],pids])
if len(twitdata) > 0:
# Process the received twitdata
for tweet in twitdata:
tweet[0] = tweet[0].replace("\\/","/") # Fix slashes in links: This may need moving further down the line - ideally it would be handled by cjson
if tweet[0] != "\r\n": # If \r\n is received, this is just a keep alive signal from Twitter every 30 secs
# At this point, each 'tweet' contains tweetdata, and a list of possible pids
newdata = cjson.decode(tweet[0])
if newdata.has_key('delete') or newdata.has_key('scrub_geo') or newdata.has_key('limit'):
# Keep a record of all requests from Twitter for deletions, location removal etc
# As yet none of these have been received, but this code will store them if they are received to enable debugging
filepath = "contentDebug.txt"
if os.path.exists(filepath):
file = open(filepath, 'r')
filecontents = file.read()
else:
filecontents = ""
file = open(filepath, 'w')
file.write(filecontents + "\n" + str(datetime.utcnow()) + " " + cjson.encode(newdata))
file.close()
else:
# This is a real tweet
tweetid = newdata['id']
print "New tweet! @" + newdata['user']['screen_name'] + ": " + newdata['text']
for pid in tweet[1]:
# Cycle through possible pids, grabbing that pid's keywords from the DB
# Then, check this tweet against the keywords and save to DB where appropriate (there may be more than one location)
cursor.execute("""SELECT keyword,type FROM keywords WHERE pid = %s""",(pid))
data = cursor.fetchall()
for row in data:
# Some keywords are stored with a ^. These must be split, and the tweet checked to see if it has both keywords, but not necessarily next to each other
keywords = row[0].split("^")
if len(keywords) == 2:
if string.lower(keywords[0]) in string.lower(newdata['text']) and string.lower(keywords[1]) in string.lower(newdata['text']):
cursor.execute("""SELECT timestamp,timediff FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
progdata = cursor.fetchone()
if progdata != None:
# Ensure the user hasn't already tweeted the same text
# Also ensure they haven't tweeted in the past 10 seconds
timestamp = time2.mktime(parse(newdata['created_at']).timetuple())
cursor.execute("""SELECT * FROM rawdata WHERE (pid = %s AND text = %s AND user = %s) OR (pid = %s AND user = %s AND timestamp >= %s AND timestamp < %s)""",(pid,newdata['text'],newdata['user']['screen_name'],pid,newdata['user']['screen_name'],timestamp-10,timestamp+10))
if cursor.fetchone() == None:
print ("Storing tweet for pid " + pid)
# Work out where this tweet really occurred in the programme using timestamps and DVB bridge data
progposition = timestamp - (progdata[0] - progdata[1])
cursor.execute("""INSERT INTO rawdata (tweet_id,pid,timestamp,text,user,programme_position) VALUES (%s,%s,%s,%s,%s,%s)""", (tweetid,pid,timestamp,newdata['text'],newdata['user']['screen_name'],progposition))
break # Break out of this loop and back to check the same tweet against the next programme
else:
print ("Duplicate tweet from user - ignoring")
if string.lower(row[0]) in string.lower(newdata['text']):
cursor.execute("""SELECT timestamp,timediff FROM programmes WHERE pid = %s ORDER BY timestamp DESC""",(pid))
progdata = cursor.fetchone()
if progdata != None:
# Ensure the user hasn't already tweeted the same text for this programme
# Also ensure they haven't tweeted in the past 10 seconds
timestamp = time2.mktime(parse(newdata['created_at']).timetuple())
cursor.execute("""SELECT * FROM rawdata WHERE (pid = %s AND text = %s AND user = %s) OR (pid = %s AND user = %s AND timestamp >= %s AND timestamp < %s)""",(pid,newdata['text'],newdata['user']['screen_name'],pid,newdata['user']['screen_name'],timestamp-10,timestamp+10))
if cursor.fetchone() == None:
print ("Storing tweet for pid " + pid)
# Work out where this tweet really occurred in the programme using timestamps and DVB bridge data
progposition = timestamp - (progdata[0] - progdata[1])
cursor.execute("""INSERT INTO rawdata (tweet_id,pid,timestamp,text,user,programme_position) VALUES (%s,%s,%s,%s,%s,%s)""", (tweetid,pid,timestamp,newdata['text'],newdata['user']['screen_name'],progposition))
break # Break out of this loop and back to check the same tweet against the next programme
else:
print ("Duplicate tweet from user - ignoring")
else:
print "Blank line received from Twitter - no new data"
print ("Done!") # new line to break up display
else:
time2.sleep(0.1)
'''
The raw data collector differs from the plain data collector in that it stores the raw JSON containers for tweets next to their unique IDs, but with no relation to PIDs
This is run concurrent to the other data collector, so the two won't necessarily run at the same rate and could be out of sync
This possible lack of sync must be handled later
'''
class RawDataCollector(threadedcomponent):
Inboxes = {
"inbox" : "Receives data in the format [tweetjson,[pid,pid]]",
"control" : ""
}
Outboxes = {
"outbox" : "",
"signal" : ""
}
def __init__(self,dbuser,dbpass):
super(RawDataCollector, self).__init__()
self.dbuser = dbuser
self.dbpass = dbpass
def finished(self):
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
return True
return False
def dbConnect(self):
db = MySQLdb.connect(user=self.dbuser,passwd=self.dbpass,db="twitter_bookmarks",use_unicode=True,charset="utf8")
cursor = db.cursor()
return cursor
def main(self):
cursor = self.dbConnect()
while not self.finished():
twitdata = list()
# As in the data collector, create a list of all tweets currently received
while self.dataReady("inbox"):
data = self.recv("inbox")
twitdata.append(data[0])
if len(twitdata) > 0:
# Cycle through the tweets, fixing their URLs as before, and storing them if they aren't a status message
for tweet in twitdata:
tweet = tweet.replace("\\/","/") # This may need moving further down the line - ideally it would be handled by cjson
if tweet != "\r\n":
newdata = cjson.decode(tweet)
if newdata.has_key('delete') or newdata.has_key('scrub_geo') or newdata.has_key('limit'):
# It is assumed here that the original data collector has handled the Twitter status message
print "Discarding tweet instruction - captured by other component"
else:
tweetid = newdata['id']
# Capture exactly when this tweet was stored
tweetstamp = time()
tweetsecs = int(tweetstamp)
# Include the fractions of seconds portion of the timestamp in a separate field
tweetfrac = tweetstamp - tweetsecs
# We only have a 16000 VARCHAR field to use in MySQL (through choice) - this should be enough, but if not, the tweet will be written out to file
if len(tweet) < 16000:
try:
cursor.execute("""INSERT INTO rawtweets (tweet_id,tweet_json,tweet_stored_seconds,tweet_stored_fraction) VALUES (%s,%s,%s,%s)""", (tweetid,tweet,tweetsecs,tweetfrac))
except _mysql_exceptions.IntegrityError, e:
# Handle the possibility for Twitter having sent us a duplicate
print "Duplicate tweet ID:", str(e)
else:
print "Discarding tweet - length limit exceeded"
tweetcontents = ""
homedir = os.path.expanduser("~")
if os.path.exists(homedir + "/oversizedtweets.conf"):
try:
file = open(homedir + "/oversizedtweets.conf",'r')
tweetcontents = file.read()
file.close()
except IOError, e:
print ("Failed to load oversized tweet cache - it will be overwritten")
try:
file = open(homedir + "/oversizedtweets.conf",'w')
tweetcontents = tweetcontents + tweet
file.write(tweetcontents)
file.close()
except IOError, e:
print ("Failed to save oversized tweet cache")
else:
time2.sleep(0.1)
| sparkslabs/kamaelia_ | Sketches/AB/backup/Bookmarks/DataCollector.py | Python | apache-2.0 | 13,360 |
"""
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a
:class:`~sklearn.gaussian_process.GaussianProcessRegressor` with different
kernels. Mean, standard deviation, and 5 samples are shown for both prior
and posterior distributions.
Here, we only give some illustration. To know more about kernels' formulation,
refer to the :ref:`User Guide <gp_kernels>`.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# Guillaume Lemaitre <[email protected]>
# License: BSD 3 clause
# %%
# Helper function
# ---------------
#
# Before presenting each individual kernel available for Gaussian processes,
# we will define an helper function allowing us plotting samples drawn from
# the Gaussian process.
#
# This function will take a
# :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model and will
# drawn sample from the Gaussian process. If the model was not fit, the samples
# are drawn from the prior distribution while after model fitting, the samples are
# drawn from the posterior distribution.
import matplotlib.pyplot as plt
import numpy as np
def plot_gpr_samples(gpr_model, n_samples, ax):
"""Plot samples drawn from the Gaussian process model.
If the Gaussian process model is not trained then the drawn samples are
drawn from the prior distribution. Otherwise, the samples are drawn from
the posterior distribution. Be aware that a sample here corresponds to a
function.
Parameters
----------
gpr_model : `GaussianProcessRegressor`
A :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model.
n_samples : int
The number of samples to draw from the Gaussian process distribution.
ax : matplotlib axis
The matplotlib axis where to plot the samples.
"""
x = np.linspace(0, 5, 100)
X = x.reshape(-1, 1)
y_mean, y_std = gpr_model.predict(X, return_std=True)
y_samples = gpr_model.sample_y(X, n_samples)
y_mean, y_std = gpr_model.predict(X, return_std=True)
y_samples = gpr_model.sample_y(X, n_samples)
for idx, single_prior in enumerate(y_samples.T):
ax.plot(
x,
single_prior,
linestyle="--",
alpha=0.7,
label=f"Sampled function #{idx + 1}",
)
ax.plot(x, y_mean, color="black", label="Mean")
ax.fill_between(
x,
y_mean - y_std,
y_mean + y_std,
alpha=0.1,
color="black",
label=r"$\pm$ 1 std. dev.",
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_ylim([-3, 3])
# %%
# Dataset and Gaussian process generation
# ---------------------------------------
# We will create a training dataset that we will use in the different sections.
rng = np.random.RandomState(4)
X_train = rng.uniform(0, 5, 10).reshape(-1, 1)
y_train = np.sin((X_train[:, 0] - 2.5) ** 2)
n_samples = 5
# %%
# Kernel cookbook
# ---------------
#
# In this section, we illustrate some samples drawn from the prior and posterior
# distributions of the Gaussian process with different kernels.
#
# Radial Basis Function kernel
# ............................
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Radial Basis Function kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Rational Quadradtic kernel
# ..........................
from sklearn.gaussian_process.kernels import RationalQuadratic
kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1, alpha_bounds=(1e-5, 1e15))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Rational Quadratic kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Periodic kernel
# ...............
from sklearn.gaussian_process.kernels import ExpSineSquared
kernel = 1.0 * ExpSineSquared(
length_scale=1.0,
periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0),
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Periodic kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Dot product kernel
# ..................
from sklearn.gaussian_process.kernels import ConstantKernel, DotProduct
kernel = ConstantKernel(0.1, (0.01, 10.0)) * (
DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0)) ** 2
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Dot product kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Mattern kernel
# ..............
from sklearn.gaussian_process.kernels import Matern
kernel = 1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Mattern kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
| huzq/scikit-learn | examples/gaussian_process/plot_gpr_prior_posterior.py | Python | bsd-3-clause | 8,547 |
test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> type(imdb_by_year) == tables.Table
True
>>> imdb_by_year.column('Title').take(range(3))
array(['The Kid (1921)', 'The Gold Rush (1925)', 'The General (1926)'],
dtype='<U75')
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| jamesfolberth/NGC_STEM_camp_AWS | notebooks/data8_notebooks/lab03/tests/q3_2.py | Python | bsd-3-clause | 530 |
# coding=utf-8
from __future__ import unicode_literals
from random import randint
from .. import Provider as AddressProvider
class Provider(AddressProvider):
address_formats = ['{{street_address}}, {{city}}, {{postcode}}']
building_number_formats = ['#', '##', '###']
city_formats = ['{{city_prefix}} {{first_name}}']
street_address_formats = ['{{street_name}}, {{building_number}}']
street_name_formats = ['{{street_prefix}} {{last_name}}',
'{{last_name}} {{street_suffix}}']
city_prefixes = ['місто', 'село', 'селище', 'хутір']
countries = [
'Австралія', 'Австрія', 'Азербайджан', 'Албанія', 'Алжир', 'Ангола',
'Андорра', 'Антигуа і Барбуда', 'Аргентина', 'Афганістан',
'Багамські Острови', 'Бангладеш', 'Барбадос', 'Бахрейн', 'Беліз',
'Бельгія', 'Бенін', 'Білорусь', 'Болгарія', 'Болівія',
'Боснія і Герцеговина', 'Ботсвана', 'Бразилія', 'Бруней',
'Буркіна-Фасо', 'Бурунді', 'Бутан', 'Вануату', 'Ватикан',
'Велика Британія', 'Венесуела', 'В\'єтнам', 'Вірменія', 'Габон',
'Гаїті', 'Гаяна', 'Гамбія', 'Гана', 'Гватемала', 'Гвінея',
'Гвінея-Бісау', 'Гондурас', 'Гренада', 'Греція', 'Грузія', 'Данія',
'Джибуті', 'Домініка', 'Домініканська Республіка', 'Еквадор',
'Екваторіальна Гвінея', 'Еритрея', 'Естонія', 'Ефіопія', 'Єгипет',
'Ємен', 'Замбія', 'Західна Сахара', 'Зімбабве', 'Ізраїль', 'Індія',
'Індонезія', 'Ірак', 'Іран', 'Ірландія', 'Ісландія', 'Іспанія',
'Італія', 'Йорданія', 'Кабо-Верде', 'Казахстан', 'Камбоджа', 'Камерун',
'Канада', 'Катар', 'Кенія', 'Киргизстан', 'КНР', 'Кіпр', 'Кірибаті',
'Колумбія', 'Коморські Острови', 'Конго', 'ДР Конго', 'Південна Корея',
'Північна Корея', 'Косово', 'Коста-Рика', 'Кот-д\'Івуар', 'Куба',
'Кувейт', 'Лаос', 'Латвія', 'Лесото', 'Литва', 'Ліберія', 'Ліван',
'Лівія', 'Ліхтенштейн', 'Люксембург', 'Маврикій', 'Мавританія',
'Мадагаскар', 'Республіка Македонія', 'Малаві', 'Малайзія', 'Малі',
'Мальдіви', 'Мальта', 'Марокко', 'Маршаллові Острови', 'Мексика',
'Федеративні Штати Мікронезії', 'Мозамбік', 'Молдова', 'Монако',
'Монголія', 'М\'янма', 'Намібія', 'Науру', 'Непал', 'Нігер', 'Нігерія',
'Нідерланди', 'Нікарагуа', 'Німеччина', 'Нова Зеландія', 'Норвегія',
'ОАЕ', 'Оман', 'Пакистан', 'Палау', 'Палестинська держава', 'Панама',
'Папуа Нова Гвінея', 'ПАР', 'Парагвай', 'Перу', 'Південний Судан',
'Польща', 'Португалія', 'Росія', 'Руанда', 'Румунія', 'Сальвадор',
'Самоа', 'Сан-Марино', 'Сан-Томе і Принсіпі', 'Саудівська Аравія',
'Свазіленд', 'Сейшельські Острови', 'Сенегал',
'Сент-Вінсент і Гренадини', 'Сент-Кіттс і Невіс', 'Сент-Люсія',
'Сербія', 'Сінгапур', 'Сирія', 'Словаччина', 'Словенія',
'Соломонові Острови', 'Сомалі', 'Судан', 'Суринам', 'Східний Тимор',
'США', 'Сьєрра-Леоне', 'Таджикистан', 'Таїланд', 'Тайвань', 'Танзанія',
'Того', 'Тонга', 'Тринідад і Тобаго', 'Тувалу', 'Туніс', 'Туреччина',
'Туркменістан', 'Уганда', 'Угорщина', 'Узбекистан', 'Україна',
'Уругвай', 'Фіджі', 'Філіппіни', 'Фінляндія', 'Франція', 'Хорватія',
'Центральноафриканська Республіка', 'Чад', 'Чехія', 'Чилі',
'Чорногорія', 'Швейцарія', 'Швеція', 'Шрі-Ланка', 'Ямайка', 'Японія'
]
street_prefixes = [
'вулиця', 'проспект', 'майдан', 'набережна', 'бульвар', 'провулок'
]
street_suffixes = ['узвіз']
@classmethod
def city_prefix(cls):
return cls.random_element(cls.city_prefixes)
@classmethod
def postcode(cls):
"""The code consists of five digits (01000-99999)"""
return '{}{}'.format(randint(0, 10), randint(1000, 10000))
@classmethod
def street_prefix(cls):
return cls.random_element(cls.street_prefixes)
| vicky2135/lucious | oscar/lib/python2.7/site-packages/faker/providers/address/uk_UA/__init__.py | Python | bsd-3-clause | 5,601 |
from oscar_vat_moss import fields
from oscar.apps.address.abstract_models import AbstractShippingAddress
from oscar.apps.address.abstract_models import AbstractBillingAddress
class ShippingAddress(AbstractShippingAddress):
vatin = fields.vatin()
class BillingAddress(AbstractBillingAddress):
vatin = fields.vatin()
from oscar.apps.order.models import * # noqa
| hastexo/django-oscar-vat_moss | oscar_vat_moss/order/models.py | Python | bsd-3-clause | 375 |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
import numpy as np
import os
TEMPLATE = "templates/template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for an image gradient dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
# Used to calculate the gradients later
self.yy, self.xx = np.mgrid[:self.image_height,
:self.image_width].astype('float')
@override
def encode_entry(self, entry):
xslope, yslope = entry
label = np.array([xslope, yslope])
a = xslope * 255 / self.image_width
b = yslope * 255 / self.image_height
image = a * (self.xx - self.image_width/2) + b * (self.yy - self.image_height/2) + 127.5
image = image.astype('uint8')
# convert to 3D tensors
image = image[np.newaxis, ...]
label = label[np.newaxis, np.newaxis, ...]
return image, label
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-gradients"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Gradients"
@override
def itemize_entries(self, stage):
count = 0
if self.userdata['is_inference_db']:
if stage == constants.TEST_DB:
if self.test_image_count:
count = self.test_image_count
else:
return [(self.gradient_x, self.gradient_y)]
else:
if stage == constants.TRAIN_DB:
count = self.train_image_count
elif stage == constants.VAL_DB:
count = self.val_image_count
elif stage == constants.TEST_DB:
count = self.test_image_count
return [np.random.random_sample(2) - 0.5 for i in xrange(count)] if count > 0 else []
| bygreencn/DIGITS | plugins/data/imageGradients/digitsDataPluginImageGradients/data.py | Python | bsd-3-clause | 3,492 |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import sys
import time
from typing import Any, Union
from pyshark.packet.fields import LayerFieldsContainer, LayerField
from pyshark.packet.packet import Packet as RawPacket
from pktverify.addrs import EthAddr, ExtAddr, Ipv6Addr
from pktverify.bytes import Bytes
from pktverify.consts import VALID_LAYER_NAMES
from pktverify.null_field import nullField
def _auto(v: Union[LayerFieldsContainer, LayerField]):
"""parse the layer field automatically according to its format"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1 or v.get_default_value() is not None, v.fields
dv = v.get_default_value()
rv = v.raw_value
if dv.startswith('0x'):
return int(dv, 16)
try:
if dv == rv:
return int(dv)
elif int(dv) == int(rv, 16):
return int(dv)
except (ValueError, TypeError):
pass
if rv is None:
try:
return int(dv)
except (ValueError, TypeError):
pass
if ':' in dv and '::' not in dv and dv.replace(':', '') == rv: # '88:00', '8800'
return int(rv, 16)
# timestamp: 'Jan 1, 1970 08:00:00.000000000 CST', '0000000000000000'
# convert to seconds from 1970, ignore the nanosecond for now since
# there are integer seconds applied in the test cases
try:
time_str = datetime.datetime.strptime(dv, "%b %d, %Y %H:%M:%S.%f000 %Z")
time_in_sec = time.mktime(time_str.utctimetuple())
return int(time_in_sec)
except (ValueError, TypeError):
pass
try:
int(rv, 16)
return int(dv)
except Exception:
pass
raise ValueError((v, v.get_default_value(), v.raw_value))
def _payload(v: Union[LayerFieldsContainer, LayerField]) -> bytearray:
"""parse the layer field as a bytearray"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
hex_value = v.raw_value
assert len(hex_value) % 2 == 0
s = bytearray()
for i in range(0, len(hex_value), 2):
s.append(int(hex_value[i:i + 2], 16))
return s
def _hex(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return int(v.get_default_value(), 16)
def _raw_hex(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a raw hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
iv = v.hex_value
try:
int(v.get_default_value())
assert int(v.get_default_value()) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
try:
int(v.get_default_value(), 16)
assert int(v.get_default_value(), 16) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
return iv
def _raw_hex_rev(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a reversed raw hex string"""
# split v into octets and reverse the order
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
rv = v.raw_value
octets = [rv[i:i + 2] for i in range(0, len(rv), 2)]
iv = int(''.join(reversed(octets)), 16)
try:
int(v.get_default_value())
assert int(v.get_default_value()) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
try:
int(v.get_default_value(), 16)
assert int(v.get_default_value(), 16) == iv, (v.get_default_value(), v.raw_value)
except ValueError:
pass
return iv
def _dec(v: Union[LayerFieldsContainer, LayerField]) -> int:
"""parse the layer field as a decimal"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return int(v.get_default_value())
def _float(v: Union[LayerFieldsContainer, LayerField]) -> float:
"""parse the layer field as a float"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return float(v.get_default_value())
def _str(v: Union[LayerFieldsContainer, LayerField]) -> str:
"""parse the layer field as a string"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return str(v.get_default_value())
def _bytes(v: Union[LayerFieldsContainer, LayerField]) -> Bytes:
"""parse the layer field as raw bytes"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return Bytes(v.raw_value)
def _ext_addr(v: Union[LayerFieldsContainer, LayerField]) -> ExtAddr:
"""parse the layer field as an extended address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return ExtAddr(v.get_default_value())
def _ipv6_addr(v: Union[LayerFieldsContainer, LayerField]) -> Ipv6Addr:
"""parse the layer field as an IPv6 address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
return Ipv6Addr(v.get_default_value())
def _eth_addr(v: Union[LayerFieldsContainer, LayerField]) -> EthAddr:
"""parse the layer field as an Ethernet MAC address"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1, v.fields
return EthAddr(v.get_default_value())
def _routerid_set(v: Union[LayerFieldsContainer, LayerField]) -> set:
"""parse the layer field as a set of router ids
Notes: the router ID mask in wireshark is a
hexadecimal string separated by ':'
"""
assert not isinstance(v, LayerFieldsContainer) or len(v.fields) == 1
try:
ridmask = str(v.get_default_value())
assert isinstance(ridmask, str), ridmask
ridmask_int = int(ridmask.replace(':', ''), base=16)
rid_set = set()
count = 0
while ridmask_int:
count += 1
if ridmask_int & 1:
rid_set.add(64 - count)
ridmask_int = ridmask_int >> 1
except ValueError:
pass
return rid_set
class _first(object):
"""parse the first layer field"""
def __init__(self, sub_parse):
self._sub_parse = sub_parse
def __call__(self, v: Union[LayerFieldsContainer, LayerField]):
return self._sub_parse(v.fields[0])
class _list(object):
"""parse all layer fields into a list"""
def __init__(self, sub_parse):
self._sub_parse = sub_parse
def __call__(self, v: Union[LayerFieldsContainer, LayerField]):
return [self._sub_parse(f) for f in v.fields]
_LAYER_FIELDS = {
# WPAN
'wpan.fcf': _raw_hex_rev,
'wpan.cmd': _auto,
'wpan.security': _auto,
'wpan.frame_type': _auto,
'wpan.pending': _auto,
'wpan.ack_request': _auto,
'wpan.pan_id_compression': _auto,
'wpan.seqno_suppression': _auto,
'wpan.ie_present': _auto,
'wpan.dst_addr_mode': _auto,
'wpan.version': _auto,
'wpan.src_addr_mode': _auto,
'wpan.dst_pan': _auto,
'wpan.seq_no': _auto,
'wpan.src16': _auto,
'wpan.dst16': _auto,
'wpan.src64': _ext_addr,
'wpan.dst64': _ext_addr,
'wpan.fcs': _raw_hex_rev,
'wpan.fcs_ok': _auto,
'wpan.frame_length': _dec,
'wpan.key_number': _auto,
'wpan.aux_sec.sec_suite': _auto,
'wpan.aux_sec.security_control_field': _auto,
'wpan.aux_sec.sec_level': _auto,
'wpan.aux_sec.key_id_mode': _auto,
'wpan.aux_sec.frame_counter_suppression': _auto,
'wpan.aux_sec.asn_in_nonce': _auto,
'wpan.aux_sec.reserved': _auto,
'wpan.aux_sec.frame_counter': _auto,
'wpan.aux_sec.key_source': _auto,
'wpan.aux_sec.key_index': _auto,
'wpan.aux_sec.hdr': _str,
'wpan.mic': _auto,
'wpan.channel': _auto,
'wpan.header_ie.id': _list(_auto),
'wpan.header_ie.csl.period': _auto,
'wpan.payload_ie.vendor.oui': _auto,
# MLE
'mle.cmd': _auto,
'mle.sec_suite': _hex,
'mle.tlv.type': _list(_dec),
'mle.tlv.len': _list(_dec),
'mle.tlv.mode.receiver_on_idle': _auto,
'mle.tlv.mode.reserved1': _auto,
'mle.tlv.mode.reserved2': _auto,
'mle.tlv.mode.device_type_bit': _auto,
'mle.tlv.mode.network_data': _auto,
'mle.tlv.challenge': _bytes,
'mle.tlv.scan_mask.r': _auto,
'mle.tlv.scan_mask.e': _auto,
'mle.tlv.version': _auto,
'mle.tlv.source_addr': _auto,
'mle.tlv.active_tstamp': _auto,
'mle.tlv.pending_tstamp': _auto,
'mle.tlv.leader_data.partition_id': _auto,
'mle.tlv.leader_data.weighting': _auto,
'mle.tlv.leader_data.data_version': _auto,
'mle.tlv.leader_data.stable_data_version': _auto,
'mle.tlv.leader_data.router_id': _auto,
'mle.tlv.route64.nbr_out': _list(_auto),
'mle.tlv.route64.nbr_in': _list(_auto),
'mle.tlv.route64.id_seq': _auto,
'mle.tlv.route64.id_mask': _routerid_set,
'mle.tlv.route64.cost': _list(_auto),
'mle.tlv.response': _bytes,
'mle.tlv.mle_frm_cntr': _auto,
'mle.tlv.ll_frm_cntr': _auto,
'mle.tlv.link_margin': _auto,
'mle.tlv.conn.sed_dgram_cnt': _auto,
'mle.tlv.conn.sed_buf_size': _auto,
'mle.tlv.conn.lq3': _auto,
'mle.tlv.conn.lq2': _auto,
'mle.tlv.conn.lq1': _auto,
'mle.tlv.conn.leader_cost': _auto,
'mle.tlv.conn.id_seq': _auto,
'mle.tlv.conn.flags.pp': _auto,
'mle.tlv.conn.active_rtrs': _auto,
'mle.tlv.timeout': _auto,
'mle.tlv.addr16': _auto,
'mle.tlv.channel': _auto,
'mle.tlv.addr_reg_iid': _list(_auto),
'mle.tlv.link_enh_ack_flags': _auto,
'mle.tlv.link_forward_series': _list(_auto),
'mle.tlv.link_requested_type_id_flags': _list(_hex),
'mle.tlv.link_sub_tlv': _auto,
'mle.tlv.link_status_sub_tlv': _auto,
'mle.tlv.query_id': _auto,
'mle.tlv.metric_type_id_flags.type': _list(_hex),
'mle.tlv.metric_type_id_flags.metric': _list(_hex),
'mle.tlv.metric_type_id_flags.l': _list(_hex),
'mle.tlv.link_requested_type_id_flags': _bytes,
# IP
'ip.version': _auto,
'ip.src': _str,
'ip.src_host': _str,
'ip.dst': _str,
'ip.dst_host': _str,
'ip.ttl': _auto,
'ip.proto': _auto,
'ip.len': _auto,
'ip.id': _auto,
'ip.host': _list(_str),
'ip.hdr_len': _dec,
'ip.frag_offset': _auto,
'ip.flags.rb': _auto,
'ip.flags.mf': _auto,
'ip.flags.df': _auto,
'ip.dsfield.ecn': _auto,
'ip.dsfield.dscp': _auto,
'ip.checksum.status': _auto,
'ip.addr': _list(_str),
'ip.options.routeralert': _bytes,
'ip.opt.type.number': _auto,
'ip.opt.type.copy': _auto,
'ip.opt.type.class': _auto,
'ip.opt.ra': _auto,
'ip.opt.len': _auto,
# UDP
'udp.stream': _auto,
'udp.srcport': _auto,
'udp.dstport': _auto,
'udp.length': _auto,
'udp.port': _list(_dec),
'udp.checksum.status': _auto,
# IPv6
'ipv6.version': _auto,
'ipv6.src': _ipv6_addr,
'ipv6.src_host': _ipv6_addr,
'ipv6.dst': _ipv6_addr,
'ipv6.dst_host': _ipv6_addr,
'ipv6.addr': _list(_ipv6_addr),
'ipv6.tclass.dscp': _auto,
'ipv6.tclass.ecn': _auto,
'ipv6.flow': _auto,
'ipv6.hlim': _auto,
'ipv6.nxt': _auto,
'ipv6.hopopts.len': _auto,
'ipv6.hopopts.nxt': _auto,
'ipv6.hopopts.len_oct': _dec,
'ipv6.host': _list(_ipv6_addr),
'ipv6.plen': _auto,
'ipv6.opt.type.rest': _list(_auto),
'ipv6.opt.type.change': _list(_auto),
'ipv6.opt.type.action': _list(_auto),
'ipv6.opt.router_alert': _auto,
'ipv6.opt.padn': _str,
'ipv6.opt.length': _list(_auto),
'ipv6.opt.mpl.seed_id': _bytes,
'ipv6.opt.mpl.sequence': _auto,
'ipv6.opt.mpl.flag.v': _auto,
'ipv6.opt.mpl.flag.s': _auto,
'ipv6.opt.mpl.flag.rsv': _auto,
'ipv6.opt.mpl.flag.m': _auto,
# Eth
'eth.src': _eth_addr,
'eth.src_resolved': _eth_addr,
'eth.dst': _eth_addr,
'eth.dst_resolved': _eth_addr,
'eth.type': _auto,
'eth.addr': _list(_eth_addr),
'eth.addr_resolved': _list(_eth_addr),
'eth.ig': _list(_auto),
'eth.lg': _list(_auto),
# 6LOWPAN
'6lowpan.src': _ipv6_addr,
'6lowpan.dst': _ipv6_addr,
'6lowpan.udp.src': _auto,
'6lowpan.udp.dst': _auto,
'6lowpan.udp.checksum': _auto,
'6lowpan.frag.offset': _auto,
'6lowpan.frag.tag': _auto,
'6lowpan.frag.size': _auto,
'6lowpan.pattern': _list(_auto),
'6lowpan.hops': _auto,
'6lowpan.padding': _auto,
'6lowpan.next': _auto,
'6lowpan.flow': _auto,
'6lowpan.ecn': _auto,
'6lowpan.iphc.tf': _auto,
'6lowpan.iphc.m': _auto,
'6lowpan.iphc.nh': _auto,
'6lowpan.iphc.hlim': _auto,
'6lowpan.iphc.cid': _auto,
'6lowpan.iphc.sac': _auto,
'6lowpan.iphc.sam': _auto,
'6lowpan.iphc.dac': _auto,
'6lowpan.iphc.dam': _auto,
'6lowpan.iphc.sci': _auto,
'6lowpan.iphc.dci': _auto,
'6lowpan.iphc.sctx.prefix': _ipv6_addr,
'6lowpan.iphc.dctx.prefix': _ipv6_addr,
'6lowpan.mesh.v': _auto,
'6lowpan.nhc.pattern': _list(_auto),
'6lowpan.nhc.udp.checksum': _auto,
'6lowpan.nhc.udp.ports': _auto,
'6lowpan.nhc.ext.nh': _auto,
'6lowpan.nhc.ext.length': _auto,
'6lowpan.nhc.ext.eid': _auto,
'6lowpan.reassembled.length': _auto,
'6lowpan.fragments': _str,
'6lowpan.fragment.count': _auto,
'6lowpan.mesh.orig16': _auto,
'6lowpan.mesh.hops8': _auto,
'6lowpan.mesh.hops': _auto,
'6lowpan.mesh.f': _auto,
'6lowpan.mesh.dest16': _auto,
# ICMPv6
'icmpv6.type': _first(_auto),
'icmpv6.code': _first(_auto),
'icmpv6.checksum': _first(_auto),
'icmpv6.reserved': _raw_hex,
'icmpv6.resptime': _float,
'icmpv6.resp_to': _auto,
'icmpv6.mldr.nb_mcast_records': _auto,
'icmpv6.nd.ra.cur_hop_limit': _auto,
'icmpv6.nd.ns.target_address': _ipv6_addr,
'icmpv6.nd.na.target_address': _ipv6_addr,
'icmpv6.nd.na.flag.s': _auto,
'icmpv6.nd.na.flag.o': _auto,
'icmpv6.nd.na.flag.r': _auto,
'icmpv6.nd.na.flag.rsv': _auto,
'icmpv6.mldr.mar.record_type': _list(_auto),
'icmpv6.mldr.mar.aux_data_len': _list(_auto),
'icmpv6.mldr.mar.nb_sources': _list(_auto),
'icmpv6.mldr.mar.multicast_address': _list(_ipv6_addr),
'icmpv6.opt.type': _list(_auto),
'icmpv6.opt.nonce': _bytes,
'icmpv6.opt.linkaddr': _eth_addr,
'icmpv6.opt.src_linkaddr': _eth_addr,
'icmpv6.opt.target_linkaddr': _eth_addr,
'icmpv6.opt.route_lifetime': _auto,
'icmpv6.opt.route_info.flag.route_preference': _auto,
'icmpv6.opt.route_info.flag.reserved': _auto,
'icmpv6.opt.prefix.valid_lifetime': _auto,
'icmpv6.opt.prefix.preferred_lifetime': _auto,
'icmpv6.opt.prefix.length': _list(_auto),
'icmpv6.opt.prefix.flag.reserved': _auto,
'icmpv6.opt.prefix.flag.r': _auto,
'icmpv6.opt.prefix.flag.l': _auto,
'icmpv6.opt.prefix.flag.a': _auto,
'icmpv6.opt.length': _list(_auto),
'icmpv6.opt.reserved': _str,
'icmpv6.nd.ra.router_lifetime': _auto,
'icmpv6.nd.ra.retrans_timer': _auto,
'icmpv6.nd.ra.reachable_time': _auto,
'icmpv6.nd.ra.flag.rsv': _auto,
'icmpv6.nd.ra.flag.prf': _auto,
'icmpv6.nd.ra.flag.p': _auto,
'icmpv6.nd.ra.flag.o': _auto,
'icmpv6.nd.ra.flag.m': _auto,
'icmpv6.nd.ra.flag.h': _auto,
'icmpv6.echo.sequence_number': _auto,
'icmpv6.echo.identifier': _auto,
'icmpv6.data.len': _auto,
# COAP
'coap.code': _auto,
'coap.version': _auto,
'coap.type': _auto,
'coap.mid': _auto,
'coap.token_len': _auto,
'coap.token': _auto,
'coap.opt.uri_path': _list(_str),
'coap.opt.name': _list(_str),
'coap.opt.length': _list(_auto),
'coap.opt.uri_path_recon': _str,
'coap.payload': _payload,
'coap.payload_length': _auto,
'coap.payload_desc': _str,
'coap.opt.end_marker': _auto,
'coap.opt.desc': _list(_str),
'coap.opt.delta': _list(_auto),
'coap.response_to': _auto,
'coap.response_time': _float,
# COAP TLVS
'coap.tlv.type': _list(_auto),
'coap.tlv.status': _auto,
'coap.tlv.target_eid': _ipv6_addr,
'coap.tlv.ml_eid': _ext_addr,
'coap.tlv.last_transaction_time': _auto,
'coap.tlv.rloc16': _auto,
'coap.tlv.net_name': _str,
'coap.tlv.ext_mac_addr': _ext_addr,
'coap.tlv.router_mask_assigned': _auto,
'coap.tlv.router_mask_id_seq': _auto,
# dtls
'dtls.handshake.type': _list(_auto),
'dtls.handshake.cookie': _auto,
'dtls.record.content_type': _list(_auto),
'dtls.alert_message.desc': _auto,
# thread beacon
'thread_bcn.protocol': _auto,
'thread_bcn.version': _auto,
'thread_bcn.network_name': _str,
'thread_bcn.epid': _ext_addr,
# thread_address
'thread_address.tlv.len': _list(_auto),
'thread_address.tlv.type': _list(_auto),
'thread_address.tlv.status': _auto,
'thread_address.tlv.target_eid': _ipv6_addr,
'thread_address.tlv.ext_mac_addr': _ext_addr,
'thread_address.tlv.router_mask_id_seq': _auto,
'thread_address.tlv.router_mask_assigned': _bytes,
'thread_address.tlv.rloc16': _hex,
'thread_address.tlv.target_eid': _ipv6_addr,
'thread_address.tlv.ml_eid': _ext_addr,
# thread bl
'thread_bl.tlv.type': _list(_auto),
'thread_bl.tlv.len': _list(_auto),
'thread_bl.tlv.target_eid': _ipv6_addr,
'thread_bl.tlv.ml_eid': _ext_addr,
'thread_bl.tlv.last_transaction_time': _auto,
'thread_bl.tlv.timeout': _auto,
# THEAD NM
'thread_nm.tlv.type': _list(_auto),
'thread_nm.tlv.ml_eid': _ext_addr,
'thread_nm.tlv.target_eid': _ipv6_addr,
'thread_nm.tlv.status': _auto,
'thread_nm.tlv.timeout': _auto,
# thread_meshcop is not a real layer
'thread_meshcop.len_size_mismatch': _str,
'thread_meshcop.tlv.type': _list(_auto),
'thread_meshcop.tlv.len8': _list(_auto),
'thread_meshcop.tlv.net_name': _list(_str), # from thread_bl
'thread_meshcop.tlv.commissioner_id': _str,
'thread_meshcop.tlv.commissioner_sess_id': _auto, # from mle
"thread_meshcop.tlv.channel_page": _auto, # from ble
"thread_meshcop.tlv.channel": _list(_auto), # from ble
"thread_meshcop.tlv.chan_mask": _str, # from ble
'thread_meshcop.tlv.chan_mask_page': _auto,
'thread_meshcop.tlv.chan_mask_len': _auto,
'thread_meshcop.tlv.chan_mask_mask': _bytes,
'thread_meshcop.tlv.discovery_req_ver': _auto,
'thread_meshcop.tlv.discovery_rsp_ver': _auto,
'thread_meshcop.tlv.discovery_rsp_n': _auto,
'thread_meshcop.tlv.energy_list': _list(_auto),
'thread_meshcop.tlv.pan_id': _list(_auto),
'thread_meshcop.tlv.xpan_id': _bytes,
'thread_meshcop.tlv.ml_prefix': _bytes,
'thread_meshcop.tlv.master_key': _bytes,
'thread_meshcop.tlv.pskc': _bytes,
'thread_meshcop.tlv.sec_policy_rot': _auto,
'thread_meshcop.tlv.sec_policy_o': _auto,
'thread_meshcop.tlv.sec_policy_n': _auto,
'thread_meshcop.tlv.sec_policy_r': _auto,
'thread_meshcop.tlv.sec_policy_c': _auto,
'thread_meshcop.tlv.sec_policy_b': _auto,
'thread_meshcop.tlv.state': _auto,
'thread_meshcop.tlv.steering_data': _bytes,
'thread_meshcop.tlv.unknown': _bytes,
'thread_meshcop.tlv.udp_port': _list(_auto),
'thread_meshcop.tlv.ba_locator': _auto,
'thread_meshcop.tlv.jr_locator': _auto,
'thread_meshcop.tlv.active_tstamp': _auto,
'thread_meshcop.tlv.pending_tstamp': _auto,
'thread_meshcop.tlv.delay_timer': _auto,
'thread_meshcop.tlv.ipv6_addr': _list(_ipv6_addr),
# THREAD NWD
'thread_nwd.tlv.type': _list(_auto),
'thread_nwd.tlv.len': _list(_auto),
'thread_nwd.tlv.stable': _list(_auto),
'thread_nwd.tlv.service.t': _auto,
'thread_nwd.tlv.service.s_id': _auto,
'thread_nwd.tlv.service.s_data_len': _auto,
'thread_nwd.tlv.service.s_data.seqno': _auto,
'thread_nwd.tlv.service.s_data.rrdelay': _auto,
'thread_nwd.tlv.service.s_data.mlrtimeout': _auto,
'thread_nwd.tlv.server_16': _list(_auto),
'thread_nwd.tlv.border_router_16': _list(_auto),
'thread_nwd.tlv.sub_tlvs': _list(_str),
# TODO: support thread_nwd.tlv.prefix.length and thread_nwd.tlv.prefix.domain_id
'thread_nwd.tlv.prefix': _list(_ipv6_addr),
'thread_nwd.tlv.border_router.pref': _auto,
'thread_nwd.tlv.border_router.flag.s': _list(_auto),
'thread_nwd.tlv.border_router.flag.r': _list(_auto),
'thread_nwd.tlv.border_router.flag.p': _list(_auto),
'thread_nwd.tlv.border_router.flag.o': _list(_auto),
'thread_nwd.tlv.border_router.flag.n': _list(_auto),
'thread_nwd.tlv.border_router.flag.dp': _list(_auto),
'thread_nwd.tlv.border_router.flag.d': _list(_auto),
'thread_nwd.tlv.border_router.flag.c': _list(_auto),
'thread_nwd.tlv.6co.flag.reserved': _auto,
'thread_nwd.tlv.6co.flag.cid': _auto,
'thread_nwd.tlv.6co.flag.c': _list(_auto),
'thread_nwd.tlv.6co.context_length': _auto,
# Thread Diagnostic
'thread_diagnostic.tlv.type': _list(_auto),
'thread_diagnostic.tlv.len8': _list(_auto),
'thread_diagnostic.tlv.general': _list(_str),
# DNS
'dns.resp.ttl': _auto,
'dns.flags.response': _auto,
}
_layer_containers = set()
for key in _LAYER_FIELDS:
assert key.strip() == key and ' ' not in key, key
secs = key.split('.')
assert len(secs) >= 2
assert secs[0] in VALID_LAYER_NAMES, secs[0]
for i in range(len(secs) - 2):
path = secs[0] + '.' + '.'.join(secs[1:i + 2])
assert path not in _LAYER_FIELDS, '%s can not be both field and path' % path
_layer_containers.add(path)
def is_layer_field(uri: str) -> bool:
"""
Returns if the URI is a valid layer field.
:param uri: The layer field URI.
"""
return uri in _LAYER_FIELDS
def is_layer_field_container(uri: str) -> bool:
"""
Returns if the URI is a valid layer field container.
:param uri: The layer field container URI.
"""
return uri in _layer_containers
def get_layer_field(packet: RawPacket, field_uri: str) -> Any:
"""
Get a given layer field from the packet.
:param packet: The packet.
:param field_uri: The layer field URI.
:return: The specified layer field.
"""
assert isinstance(packet, RawPacket)
secs = field_uri.split('.')
layer_depth = 0
layer_name = secs[0]
if layer_name.endswith('inner'):
layer_name = layer_name[:-len('inner')]
field_uri = '.'.join([layer_name] + secs[1:])
layer_depth = 1
if is_layer_field(field_uri):
candidate_layers = _get_candidate_layers(packet, layer_name)
for layers in candidate_layers:
if layer_depth >= len(layers):
continue
layer = layers[layer_depth]
v = layer.get_field(field_uri)
if v is not None:
try:
v = _LAYER_FIELDS[field_uri](v)
print("[%s = %r] " % (field_uri, v), file=sys.stderr)
return v
except Exception as ex:
raise ValueError('can not parse field %s = %r' % (field_uri,
(v.get_default_value(), v.raw_value))) from ex
print("[%s = %s] " % (field_uri, "null"), file=sys.stderr)
return nullField
elif is_layer_field_container(field_uri):
from pktverify.layer_fields_container import LayerFieldsContainer
return LayerFieldsContainer(packet, field_uri)
else:
raise NotImplementedError('Field %s is not valid, please add it to `_LAYER_FIELDS`' % field_uri)
def check_layer_field_exists(packet, field_uri):
"""
Check if a given layer field URI exists in the packet.
:param packet: The packet to check.
:param field_uri: The layer field URI.
:return: Whether the layer field URI exists in the packet.
"""
assert isinstance(packet, RawPacket)
secs = field_uri.split('.')
layer_name = secs[0]
if not is_layer_field(field_uri) and not is_layer_field_container(field_uri):
raise NotImplementedError('%s is neither a field or field container' % field_uri)
candidate_layers = _get_candidate_layers(packet, layer_name)
for layers in candidate_layers:
for layer in layers:
for k, v in layer._all_fields.items():
if k == field_uri or k.startswith(field_uri + '.'):
return True
return False
def _get_candidate_layers(packet, layer_name):
if layer_name == 'thread_meshcop':
candidate_layer_names = ['thread_meshcop', 'mle', 'coap', 'thread_bl', 'thread_nm']
elif layer_name == 'thread_nwd':
candidate_layer_names = ['mle', 'thread_address', 'thread_diagnostic']
elif layer_name == 'wpan':
candidate_layer_names = ['wpan', 'mle']
elif layer_name == 'ip':
candidate_layer_names = ['ip', 'ipv6']
elif layer_name == 'thread_bcn':
candidate_layer_names = ['thread_bcn']
else:
candidate_layer_names = [layer_name]
layers = []
for ln in candidate_layer_names:
if hasattr(packet, ln):
layers.append(packet.get_multiple_layers(ln))
return layers
| jwhui/openthread | tests/scripts/thread-cert/pktverify/layer_fields.py | Python | bsd-3-clause | 26,646 |
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.core.checks import Error, Warning as DjangoWarning
from django.db import models
from django.db.models.fields.related import ForeignObject
from django.test import ignore_warnings
from django.test.testcases import SimpleTestCase, skipIfDBFeature
from django.test.utils import isolate_apps, override_settings
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.version import get_docs_version
@isolate_apps('invalid_models_tests')
class RelativeFieldTests(SimpleTestCase):
def test_valid_foreign_key_without_accessor(self):
class Target(models.Model):
# There would be a clash if Model.field installed an accessor.
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, related_name='+')
field = Model._meta.get_field('field')
errors = field.check()
self.assertEqual(errors, [])
@ignore_warnings(category=RemovedInDjango20Warning)
def test_valid_foreign_key_without_on_delete(self):
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
def test_foreign_key_without_on_delete_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, related_name='+')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'on_delete will be a required arg for ForeignKey in Django '
'2.0. Set it to models.CASCADE on models and in existing '
'migrations if you want to maintain the current default '
'behavior. See https://docs.djangoproject.com/en/%s/ref/models/fields/'
'#django.db.models.ForeignKey.on_delete' % get_docs_version(),
)
def test_foreign_key_to_field_as_arg(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, 'id')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"The signature for ForeignKey will change in Django 2.0. "
"Pass to_field='id' as a kwarg instead of as an arg."
)
def test_one_to_one_field_without_on_delete_warning(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.OneToOneField(Target, related_name='+')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
'on_delete will be a required arg for OneToOneField in Django '
'2.0. Set it to models.CASCADE on models and in existing '
'migrations if you want to maintain the current default '
'behavior. See https://docs.djangoproject.com/en/%s/ref/models/fields/'
'#django.db.models.ForeignKey.on_delete' % get_docs_version(),
)
def test_one_to_one_field_to_field_as_arg(self):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
class Target(models.Model):
model = models.IntegerField()
class Model(models.Model):
field = models.OneToOneField(Target, 'id')
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns[0].message),
"The signature for OneToOneField will change in Django 2.0. "
"Pass to_field='id' as a kwarg instead of as an arg."
)
def test_foreign_key_to_missing_model(self):
# Model names are resolved when a model is being created, so we cannot
# test relative fields in isolation and we need to attach them to a
# model.
class Model(models.Model):
foreign_key = models.ForeignKey('Rel1', models.CASCADE)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
("Field defines a relation with model 'Rel1', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_foreign_key_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('OtherModel', models.CASCADE)
field = Model._meta.get_field('foreign_key')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_to_missing_model(self):
class Model(models.Model):
m2m = models.ManyToManyField("Rel2")
field = Model._meta.get_field('m2m')
errors = field.check(from_model=Model)
expected = [
Error(
("Field defines a relation with model 'Rel2', "
"which is either not installed, or is abstract."),
hint=None,
obj=field,
id='fields.E300',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_to_isolate_apps_model(self):
"""
#25723 - Referenced model registration lookup should be run against the
field's model registry.
"""
class OtherModel(models.Model):
pass
class Model(models.Model):
m2m = models.ManyToManyField('OtherModel')
field = Model._meta.get_field('m2m')
self.assertEqual(field.check(from_model=Model), [])
def test_many_to_many_with_useless_options(self):
class Model(models.Model):
name = models.CharField(max_length=20)
class ModelM2M(models.Model):
m2m = models.ManyToManyField(Model, null=True, validators=[''])
errors = ModelM2M.check()
field = ModelM2M._meta.get_field('m2m')
expected = [
DjangoWarning(
'null has no effect on ManyToManyField.',
hint=None,
obj=field,
id='fields.W340',
)
]
expected.append(
DjangoWarning(
'ManyToManyField does not support validators.',
hint=None,
obj=field,
id='fields.W341',
)
)
self.assertEqual(errors, expected)
def test_ambiguous_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
field = models.ManyToManyField('Person',
through="AmbiguousRelationship", related_name='tertiary')
class AmbiguousRelationship(models.Model):
# Too much foreign keys to Person.
first_person = models.ForeignKey(Person, models.CASCADE, related_name="first")
second_person = models.ForeignKey(Person, models.CASCADE, related_name="second")
second_model = models.ForeignKey(Group, models.CASCADE)
field = Group._meta.get_field('field')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.field', but it has more than one "
"foreign key to 'Person', which is ambiguous. You must specify "
"which foreign key Django should use via the through_fields "
"keyword argument."),
hint=('If you want to create a recursive relationship, use '
'ForeignKey("self", symmetrical=False, '
'through="AmbiguousRelationship").'),
obj=field,
id='fields.E335',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_with_foreign_key_to_wrong_model(self):
class WrongModel(models.Model):
pass
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
wrong_foreign_key = models.ForeignKey(WrongModel, models.CASCADE)
# The last foreign key should point to Group model.
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not "
"have a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_relationship_model_missing_foreign_key(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="InvalidRelationship")
class InvalidRelationship(models.Model):
group = models.ForeignKey(Group, models.CASCADE)
# No foreign key to Person
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Group.members', but it does not have "
"a foreign key to 'Group' or 'Person'."),
hint=None,
obj=InvalidRelationship,
id='fields.E336',
),
]
self.assertEqual(errors, expected)
def test_missing_relationship_model(self):
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person',
through="MissingM2MModel")
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
("Field specifies a many-to-many relation through model "
"'MissingM2MModel', which has not been installed."),
hint=None,
obj=field,
id='fields.E331',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
def test_many_to_many_through_isolate_apps_model(self):
"""
#25723 - Through model registration lookup should be run against the
field's model registry.
"""
class GroupMember(models.Model):
person = models.ForeignKey('Person', models.CASCADE)
group = models.ForeignKey('Group', models.CASCADE)
class Person(models.Model):
pass
class Group(models.Model):
members = models.ManyToManyField('Person', through='GroupMember')
field = Group._meta.get_field('members')
self.assertEqual(field.check(from_model=Group), [])
def test_symmetrical_self_referential_field(self):
class Person(models.Model):
# Implicit symmetrical=False.
friends = models.ManyToManyField('self', through="Relationship")
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_too_many_foreign_keys_in_self_referential_model(self):
class Person(models.Model):
friends = models.ManyToManyField('self',
through="InvalidRelationship", symmetrical=False)
class InvalidRelationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set_2")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set_2")
third = models.ForeignKey(Person, models.CASCADE, related_name="too_many_by_far")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
("The model is used as an intermediate model by "
"'invalid_models_tests.Person.friends', but it has more than two "
"foreign keys to 'Person', which is ambiguous. You must specify "
"which two foreign keys Django should use via the through_fields "
"keyword argument."),
hint='Use through_fields to specify which two foreign keys Django should use.',
obj=InvalidRelationship,
id='fields.E333',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table(self):
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
through="Relationship", symmetrical=True)
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_symmetric_self_reference_with_intermediate_table_and_through_fields(self):
"""
Using through_fields in a m2m with an intermediate model shouldn't
mask its incompatibility with symmetry.
"""
class Person(models.Model):
# Explicit symmetrical=True.
friends = models.ManyToManyField('self',
symmetrical=True,
through="Relationship",
through_fields=('first', 'second'))
class Relationship(models.Model):
first = models.ForeignKey(Person, models.CASCADE, related_name="rel_from_set")
second = models.ForeignKey(Person, models.CASCADE, related_name="rel_to_set")
referee = models.ForeignKey(Person, models.CASCADE, related_name="referred")
field = Person._meta.get_field('friends')
errors = field.check(from_model=Person)
expected = [
Error(
'Many-to-many fields with intermediate tables must not be symmetrical.',
hint=None,
obj=field,
id='fields.E332',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_foreign_key = models.ForeignKey('AbstractModel', models.CASCADE)
rel_class_foreign_key = models.ForeignKey(AbstractModel, models.CASCADE)
fields = [
Model._meta.get_field('rel_string_foreign_key'),
Model._meta.get_field('rel_class_foreign_key'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check()
self.assertEqual(errors, [expected_error])
def test_m2m_to_abstract_model(self):
class AbstractModel(models.Model):
class Meta:
abstract = True
class Model(models.Model):
rel_string_m2m = models.ManyToManyField('AbstractModel')
rel_class_m2m = models.ManyToManyField(AbstractModel)
fields = [
Model._meta.get_field('rel_string_m2m'),
Model._meta.get_field('rel_class_m2m'),
]
expected_error = Error(
"Field defines a relation with model 'AbstractModel', "
"which is either not installed, or is abstract.",
id='fields.E300',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_unique_m2m(self):
class Person(models.Model):
name = models.CharField(max_length=5)
class Group(models.Model):
members = models.ManyToManyField('Person', unique=True)
field = Group._meta.get_field('members')
errors = field.check(from_model=Group)
expected = [
Error(
'ManyToManyFields cannot be unique.',
hint=None,
obj=field,
id='fields.E330',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field(self):
class Target(models.Model):
bad = models.IntegerField() # No unique=True
class Model(models.Model):
foreign_key = models.ForeignKey('Target', models.CASCADE, to_field='bad')
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_key_to_non_unique_field_under_explicit_model(self):
class Target(models.Model):
bad = models.IntegerField()
class Model(models.Model):
field = models.ForeignKey(Target, models.CASCADE, to_field='bad')
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
"'Target.bad' must set unique=True because it is referenced by a foreign key.",
hint=None,
obj=field,
id='fields.E311',
),
]
self.assertEqual(errors, expected)
def test_foreign_object_to_non_unique_fields(self):
class Person(models.Model):
# Note that both fields are not unique.
country_id = models.IntegerField()
city_id = models.IntegerField()
class MMembership(models.Model):
person_country_id = models.IntegerField()
person_city_id = models.IntegerField()
person = models.ForeignObject(Person,
on_delete=models.CASCADE,
from_fields=['person_country_id', 'person_city_id'],
to_fields=['country_id', 'city_id'])
field = MMembership._meta.get_field('person')
errors = field.check()
expected = [
Error(
"No subset of the fields 'country_id', 'city_id' on model 'Person' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
)
]
self.assertEqual(errors, expected)
def test_on_delete_set_null_on_non_nullable_field(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_NULL)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_NULL, but cannot be null.',
hint='Set null=True argument on the field, or change the on_delete rule.',
obj=field,
id='fields.E320',
),
]
self.assertEqual(errors, expected)
def test_on_delete_set_default_without_default_value(self):
class Person(models.Model):
pass
class Model(models.Model):
foreign_key = models.ForeignKey('Person', models.SET_DEFAULT)
field = Model._meta.get_field('foreign_key')
errors = field.check()
expected = [
Error(
'Field specifies on_delete=SET_DEFAULT, but has no default value.',
hint='Set a default value, or change the on_delete rule.',
obj=field,
id='fields.E321',
),
]
self.assertEqual(errors, expected)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_nullable_primary_key(self):
class Model(models.Model):
field = models.IntegerField(primary_key=True, null=True)
field = Model._meta.get_field('field')
errors = field.check()
expected = [
Error(
'Primary keys must not have null=True.',
hint='Set null=False on the field, or remove primary_key=True argument.',
obj=field,
id='fields.E007',
),
]
self.assertEqual(errors, expected)
def test_not_swapped_model(self):
class SwappableModel(models.Model):
# A model that can be, but isn't swapped out. References to this
# model should *not* raise any validation error.
class Meta:
swappable = 'TEST_SWAPPABLE_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappableModel,
models.CASCADE,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappableModel',
models.CASCADE,
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappableModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappableModel',
related_name='implicit_m2m')
explicit_fk = Model._meta.get_field('explicit_fk')
self.assertEqual(explicit_fk.check(), [])
implicit_fk = Model._meta.get_field('implicit_fk')
self.assertEqual(implicit_fk.check(), [])
explicit_m2m = Model._meta.get_field('explicit_m2m')
self.assertEqual(explicit_m2m.check(from_model=Model), [])
implicit_m2m = Model._meta.get_field('implicit_m2m')
self.assertEqual(implicit_m2m.check(from_model=Model), [])
@override_settings(TEST_SWAPPED_MODEL='invalid_models_tests.Replacement')
def test_referencing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
explicit_fk = models.ForeignKey(SwappedModel,
models.CASCADE,
related_name='explicit_fk')
implicit_fk = models.ForeignKey('invalid_models_tests.SwappedModel',
models.CASCADE,
related_name='implicit_fk')
explicit_m2m = models.ManyToManyField(SwappedModel,
related_name='explicit_m2m')
implicit_m2m = models.ManyToManyField(
'invalid_models_tests.SwappedModel',
related_name='implicit_m2m')
fields = [
Model._meta.get_field('explicit_fk'),
Model._meta.get_field('implicit_fk'),
Model._meta.get_field('explicit_m2m'),
Model._meta.get_field('implicit_m2m'),
]
expected_error = Error(
("Field defines a relation with the model "
"'invalid_models_tests.SwappedModel', which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
id='fields.E301',
)
for field in fields:
expected_error.obj = field
errors = field.check(from_model=Model)
self.assertEqual(errors, [expected_error])
def test_related_field_has_invalid_related_name(self):
digit = 0
illegal_non_alphanumeric = '!'
whitespace = '\t'
invalid_related_names = [
'%s_begins_with_digit' % digit,
'%s_begins_with_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'%s_begins_with_whitespace' % whitespace,
'contains_%s_illegal_non_alphanumeric' % illegal_non_alphanumeric,
'contains_%s_whitespace' % whitespace,
'ends_with_with_illegal_non_alphanumeric_%s' % illegal_non_alphanumeric,
'ends_with_whitespace_%s' % whitespace,
'with', # a Python keyword
'related_name\n',
'',
]
# Python 2 crashes on non-ASCII strings.
if six.PY3:
invalid_related_names.append(',')
class Parent(models.Model):
pass
for invalid_related_name in invalid_related_names:
Child = type(str('Child_%s') % str(invalid_related_name), (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=invalid_related_name),
'__module__': Parent.__module__,
})
field = Child._meta.get_field('parent')
errors = Child.check()
expected = [
Error(
"The name '%s' is invalid related_name for field Child_%s.parent"
% (invalid_related_name, invalid_related_name),
hint="Related name must be a valid Python identifier or end with a '+'",
obj=field,
id='fields.E306',
),
]
self.assertEqual(errors, expected)
def test_related_field_has_valid_related_name(self):
lowercase = 'a'
uppercase = 'A'
digit = 0
related_names = [
'%s_starts_with_lowercase' % lowercase,
'%s_tarts_with_uppercase' % uppercase,
'_starts_with_underscore',
'contains_%s_digit' % digit,
'ends_with_plus+',
'_',
'_+',
'+',
]
# Python 2 crashes on non-ASCII strings.
if six.PY3:
related_names.extend(['試', '試驗+'])
class Parent(models.Model):
pass
for related_name in related_names:
Child = type(str('Child_%s') % str(related_name), (models.Model,), {
'parent': models.ForeignKey('Parent', models.CASCADE, related_name=related_name),
'__module__': Parent.__module__,
})
errors = Child.check()
self.assertFalse(errors)
@isolate_apps('invalid_models_tests')
class AccessorClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_accessor_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_accessor_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_accessor_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_accessor_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model_set = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.model_set'.",
hint=("Rename field 'Target.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_clash_between_accessors(self):
class Target(models.Model):
pass
class Model(models.Model):
foreign = models.ForeignKey(Target, models.CASCADE)
m2m = models.ManyToManyField(Target)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with reverse accessor for 'Model.m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.foreign' or 'Model.m2m'."),
obj=Model._meta.get_field('foreign'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.m2m' clashes with reverse accessor for 'Model.foreign'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m' or 'Model.foreign'."),
obj=Model._meta.get_field('m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_m2m_to_m2m_with_inheritance(self):
""" Ref #22047. """
class Target(models.Model):
pass
class Model(models.Model):
children = models.ManyToManyField('Child',
related_name="m2m_clash", related_query_name="no_clash")
class Parent(models.Model):
m2m_clash = models.ManyToManyField('Target')
class Child(Parent):
pass
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.children' clashes with field name 'Child.m2m_clash'.",
hint=("Rename field 'Child.m2m_clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.children'."),
obj=Model._meta.get_field('children'),
id='fields.E302',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ReverseQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE))
def test_fk_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE))
def test_m2m_to_integer(self):
self._test_reverse_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target'))
def test_m2m_to_fk(self):
self._test_reverse_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target'))
def test_m2m_to_m2m(self):
self._test_reverse_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target'))
def _test_reverse_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
model = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.model'.",
hint=("Rename field 'Target.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target', models.CASCADE, related_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target', related_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target', related_name='clash'))
def _test_explicit_related_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ExplicitRelatedQueryNameClashTests(SimpleTestCase):
def test_fk_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_fk_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_fk_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ForeignKey('Target',
models.CASCADE,
related_query_name='clash'))
def test_m2m_to_integer(self):
self._test_explicit_related_query_name_clash(
target=models.IntegerField(),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_fk(self):
self._test_explicit_related_query_name_clash(
target=models.ForeignKey('Another', models.CASCADE),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def test_m2m_to_m2m(self):
self._test_explicit_related_query_name_clash(
target=models.ManyToManyField('Another'),
relative=models.ManyToManyField('Target',
related_query_name='clash'))
def _test_explicit_related_query_name_clash(self, target, relative):
class Another(models.Model):
pass
class Target(models.Model):
clash = target
class Model(models.Model):
rel = relative
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.rel' clashes with field name 'Target.clash'.",
hint=("Rename field 'Target.clash', or add/change a related_name "
"argument to the definition for field 'Model.rel'."),
obj=Model._meta.get_field('rel'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class SelfReferentialM2MClashTests(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField('self', symmetrical=False)
second_m2m = models.ManyToManyField('self', symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.first_m2m' clashes with reverse accessor for 'Model.second_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.first_m2m' or 'Model.second_m2m'."),
obj=Model._meta.get_field('first_m2m'),
id='fields.E304',
),
Error(
"Reverse accessor for 'Model.second_m2m' clashes with reverse accessor for 'Model.first_m2m'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.second_m2m' or 'Model.first_m2m'."),
obj=Model._meta.get_field('second_m2m'),
id='fields.E304',
),
]
self.assertEqual(errors, expected)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change a related_name "
"argument to the definition for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField("self",
symmetrical=False, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change a related_name "
"argument to the definition for field 'Model.m2m'."),
obj=Model._meta.get_field('m2m'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField("self",
symmetrical=False, related_name='first_accessor')
second = models.ManyToManyField("self",
symmetrical=False, related_name='second_accessor')
errors = Model.check()
self.assertEqual(errors, [])
@isolate_apps('invalid_models_tests')
class SelfReferentialFKClashTests(SimpleTestCase):
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.model_set' clashes with field name 'Model.model_set'.",
hint=("Rename field 'Model.model_set', or add/change "
"a related_name argument to the definition "
"for field 'Model.model_set'."),
obj=Model._meta.get_field('model_set'),
id='fields.E302',
),
]
self.assertEqual(errors, expected)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ForeignKey("Model", models.CASCADE)
errors = Model.check()
expected = [
Error(
"Reverse query name for 'Model.model' clashes with field name 'Model.model'.",
hint=("Rename field 'Model.model', or add/change "
"a related_name argument to the definition "
"for field 'Model.model'."),
obj=Model._meta.get_field('model'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.CharField(max_length=10)
foreign = models.ForeignKey("Model", models.CASCADE, related_name='clash')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign' clashes with field name 'Model.clash'.",
hint=("Rename field 'Model.clash', or add/change "
"a related_name argument to the definition "
"for field 'Model.foreign'."),
obj=Model._meta.get_field('foreign'),
id='fields.E303',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ComplexClashTests(SimpleTestCase):
# New tests should not be included here, because this is a single,
# self-contained sanity check, not a test of everything.
def test_complex_clash(self):
class Target(models.Model):
tgt_safe = models.CharField(max_length=10)
clash = models.CharField(max_length=10)
model = models.CharField(max_length=10)
clash1_set = models.CharField(max_length=10)
class Model(models.Model):
src_safe = models.CharField(max_length=10)
foreign_1 = models.ForeignKey(Target, models.CASCADE, related_name='id')
foreign_2 = models.ForeignKey(Target, models.CASCADE, related_name='src_safe')
m2m_1 = models.ManyToManyField(Target, related_name='id')
m2m_2 = models.ManyToManyField(Target, related_name='src_safe')
errors = Model.check()
expected = [
Error(
"Reverse accessor for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.foreign_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.foreign_1' clashes with reverse accessor for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_1' clashes with reverse query name for 'Model.m2m_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_1' or 'Model.m2m_1'."),
obj=Model._meta.get_field('foreign_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.foreign_2' clashes with reverse accessor for 'Model.m2m_2'.",
hint=("Add or change a related_name argument "
"to the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.foreign_2' clashes with reverse query name for 'Model.m2m_2'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.foreign_2' or 'Model.m2m_2'."),
obj=Model._meta.get_field('foreign_2'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E302',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with field name 'Target.id'.",
hint=("Rename field 'Target.id', or add/change a related_name "
"argument to the definition for field 'Model.m2m_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E303',
),
Error(
"Reverse accessor for 'Model.m2m_1' clashes with reverse accessor for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_1' clashes with reverse query name for 'Model.foreign_1'.",
hint=("Add or change a related_name argument to "
"the definition for 'Model.m2m_1' or 'Model.foreign_1'."),
obj=Model._meta.get_field('m2m_1'),
id='fields.E305',
),
Error(
"Reverse accessor for 'Model.m2m_2' clashes with reverse accessor for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E304',
),
Error(
"Reverse query name for 'Model.m2m_2' clashes with reverse query name for 'Model.foreign_2'.",
hint=("Add or change a related_name argument to the definition "
"for 'Model.m2m_2' or 'Model.foreign_2'."),
obj=Model._meta.get_field('m2m_2'),
id='fields.E305',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class M2mThroughFieldsTests(SimpleTestCase):
def test_m2m_field_argument_validation(self):
"""
Tests that ManyToManyField accepts the ``through_fields`` kwarg
only if an intermediary table is specified.
"""
class Fan(models.Model):
pass
with self.assertRaisesMessage(ValueError, 'Cannot specify through_fields without a through model'):
models.ManyToManyField(Fan, through_fields=('f1', 'f2'))
def test_invalid_order(self):
"""
Tests that mixing up the order of link fields to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=('invitee', 'event'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
("'Invitation.invitee' is not a foreign key to 'Event'."),
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E339'),
Error(
("'Invitation.event' is not a foreign key to 'Fan'."),
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E339'),
]
self.assertEqual(expected, errors)
def test_invalid_field(self):
"""
Tests that providing invalid field names to ManyToManyField.through_fields
triggers validation errors.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(
Fan,
through='Invitation',
through_fields=('invalid_field_1', 'invalid_field_2'),
)
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_1'.",
hint="Did you mean one of the following foreign keys to 'Event': event?",
obj=field,
id='fields.E338'),
Error(
"The intermediary model 'invalid_models_tests.Invitation' has no field 'invalid_field_2'.",
hint="Did you mean one of the following foreign keys to 'Fan': invitee, inviter?",
obj=field,
id='fields.E338'),
]
self.assertEqual(expected, errors)
def test_explicit_field_names(self):
"""
Tests that if ``through_fields`` kwarg is given, it must specify both
link fields of the intermediary table.
"""
class Fan(models.Model):
pass
class Event(models.Model):
invitees = models.ManyToManyField(Fan, through='Invitation', through_fields=(None, 'invitee'))
class Invitation(models.Model):
event = models.ForeignKey(Event, models.CASCADE)
invitee = models.ForeignKey(Fan, models.CASCADE)
inviter = models.ForeignKey(Fan, models.CASCADE, related_name='+')
field = Event._meta.get_field('invitees')
errors = field.check(from_model=Event)
expected = [
Error(
"Field specifies 'through_fields' but does not provide the names "
"of the two link fields that should be used for the relation "
"through model 'invalid_models_tests.Invitation'.",
hint=("Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"),
obj=field,
id='fields.E337')]
self.assertEqual(expected, errors)
def test_superset_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b'),
to_fields=('a', 'b'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
def test_intersection_foreign_object(self):
class Parent(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
c = models.PositiveIntegerField()
d = models.PositiveIntegerField()
class Meta:
unique_together = (('a', 'b', 'c'),)
class Child(models.Model):
a = models.PositiveIntegerField()
b = models.PositiveIntegerField()
d = models.PositiveIntegerField()
value = models.CharField(max_length=255)
parent = ForeignObject(
Parent,
on_delete=models.SET_NULL,
from_fields=('a', 'b', 'd'),
to_fields=('a', 'b', 'd'),
related_name='children',
)
field = Child._meta.get_field('parent')
errors = field.check(from_model=Child)
expected = [
Error(
"No subset of the fields 'a', 'b', 'd' on model 'Parent' is unique.",
hint=(
"Add unique=True on any of those fields or add at least "
"a subset of them to a unique_together constraint."
),
obj=field,
id='fields.E310',
),
]
self.assertEqual(expected, errors)
| himleyb85/django | tests/invalid_models_tests/test_relative_fields.py | Python | bsd-3-clause | 58,804 |
"""
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <[email protected]>
Created: 11 January 2003
Requires: Python 2.x
Successfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['exec_command', 'find_executable']
import os
import sys
import subprocess
import locale
import warnings
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
def filepath_from_subprocess_output(output):
"""
Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
Inherited from `exec_command`, and possibly incorrect.
"""
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
output = output.decode(mylocale, errors='replace')
output = output.replace('\r\n', '\n')
# Another historical oddity
if output[-1:] == '\n':
output = output[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
output = output.encode('ascii', errors='replace')
return output
def forward_bytes_to_stdout(val):
"""
Forward bytes from a subprocess call to the console, without attempting to
decode them.
The assumption is that the subprocess call already returned bytes in
a suitable encoding.
"""
if sys.version_info.major < 3:
# python 2 has binary output anyway
sys.stdout.write(val)
elif hasattr(sys.stdout, 'buffer'):
# use the underlying binary output if there is one
sys.stdout.buffer.write(val)
elif hasattr(sys.stdout, 'encoding'):
# round-trip the encoding if necessary
sys.stdout.write(val.decode(sys.stdout.encoding))
else:
# make a best-guess at the encoding
sys.stdout.write(val.decode('utf8', errors='replace'))
def temp_file_name():
# 2019-01-30, 1.17
warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
.. deprecated:: 1.17
Use subprocess.Popen instead
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
# 2019-01-30, 1.17
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
st = _exec_command(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command(command, use_shell=None, use_tee = None, **env):
"""
Internal workhorse for exec_command().
"""
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
if os.name == 'posix' and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
command = [sh, '-c', ' '.join(command)]
else:
command = [sh, '-c', command]
use_shell = False
elif os.name == 'nt' and is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = ' '.join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
# universal_newlines is set to False so that communicate()
# will return bytes. We need to decode the output ourselves
# so that Python will not raise a UnicodeDecodeError when
# it encounters an invalid character; rather, we simply replace it
proc = subprocess.Popen(command, shell=use_shell, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
except EnvironmentError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
text, err = proc.communicate()
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
text = text.decode(mylocale, errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
text = text[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
text = text.encode('ascii', errors='replace')
if use_tee and text:
print(text)
return proc.returncode, text
def _quote_arg(arg):
"""
Quote the argument for safe use in a shell command line.
"""
# If there is a quote in the string, assume relevants parts of the
# string are already quoted (e.g. '-I"C:\\Program Files\\..."')
if '"' not in arg and ' ' in arg:
return '"%s"' % arg
return arg
############################################################
| shoyer/numpy | numpy/distutils/exec_command.py | Python | bsd-3-clause | 10,919 |
# -*- coding: utf-8 -*-
"""
wakatime.dependencies.haxe
~~~~~~~~~~~~~~~~~~~~~~~~~~
Parse dependencies from Haxe code.
:copyright: (c) 2018 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from . import TokenParser
class HaxeParser(TokenParser):
exclude = [
r'^haxe$',
]
state = None
def parse(self):
for index, token, content in self.tokens:
self._process_token(token, content)
return self.dependencies
def _process_token(self, token, content):
if self.partial(token) == 'Namespace':
self._process_namespace(token, content)
elif self.partial(token) == 'Text':
self._process_text(token, content)
else:
self._process_other(token, content)
def _process_namespace(self, token, content):
if self.state == 'import':
self.append(self._format(content))
self.state = None
else:
self.state = content
def _process_text(self, token, content):
pass
def _process_other(self, token, content):
self.state = None
def _format(self, content):
return content.strip()
| wakatime/wakatime | wakatime/dependencies/haxe.py | Python | bsd-3-clause | 1,199 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import absolute_import
import contextlib
import logging
import os
import py_utils
from py_utils import binary_manager
from py_utils import cloud_storage
from py_utils import dependency_util
import dependency_manager
from dependency_manager import base_config
from devil import devil_env
from telemetry.core import exceptions
from telemetry.core import util
TELEMETRY_PROJECT_CONFIG = os.path.join(
util.GetTelemetryDir(), 'telemetry', 'binary_dependencies.json')
CHROME_BINARY_CONFIG = os.path.join(util.GetCatapultDir(), 'common', 'py_utils',
'py_utils', 'chrome_binaries.json')
SUPPORTED_DEP_PLATFORMS = (
'linux_aarch64', 'linux_x86_64', 'linux_armv7l', 'linux_mips',
'mac_x86_64', 'mac_arm64',
'win_x86', 'win_AMD64',
'android_arm64-v8a', 'android_armeabi-v7a', 'android_arm', 'android_x64',
'android_x86'
)
PLATFORMS_TO_DOWNLOAD_FOLDER_MAP = {
'linux_aarch64': 'bin/linux/aarch64',
'linux_x86_64': 'bin/linux/x86_64',
'linux_armv7l': 'bin/linux/armv7l',
'linux_mips': 'bin/linux/mips',
'mac_x86_64': 'bin/mac/x86_64',
'mac_arm64': 'bin/mac/arm64',
'win_x86': 'bin/win/x86',
'win_AMD64': 'bin/win/AMD64',
'android_arm64-v8a': 'bin/android/arm64-v8a',
'android_armeabi-v7a': 'bin/android/armeabi-v7a',
'android_arm': 'bin/android/arm',
'android_x64': 'bin/android/x64',
'android_x86': 'bin/android/x86',
}
NoPathFoundError = dependency_manager.NoPathFoundError
CloudStorageError = dependency_manager.CloudStorageError
_binary_manager = None
_installed_helpers = set()
TELEMETRY_BINARY_BASE_CS_FOLDER = 'binary_dependencies'
TELEMETRY_BINARY_CS_BUCKET = cloud_storage.PUBLIC_BUCKET
def NeedsInit():
return not _binary_manager
def InitDependencyManager(client_configs):
if GetBinaryManager():
raise exceptions.InitializationError(
'Trying to re-initialize the binary manager with config %s'
% client_configs)
configs = []
if client_configs:
configs += client_configs
configs += [TELEMETRY_PROJECT_CONFIG, CHROME_BINARY_CONFIG]
SetBinaryManager(binary_manager.BinaryManager(configs))
devil_env.config.Initialize()
@contextlib.contextmanager
def TemporarilyReplaceBinaryManager(manager):
old_manager = GetBinaryManager()
try:
SetBinaryManager(manager)
yield
finally:
SetBinaryManager(old_manager)
def GetBinaryManager():
return _binary_manager
def SetBinaryManager(manager):
global _binary_manager # pylint: disable=global-statement
_binary_manager = manager
def _IsChromeOSLocalMode(os_name):
"""Determines if we're running telemetry on a Chrome OS device.
Used to differentiate local mode (telemetry running on the CrOS DUT) from
remote mode (running telemetry on another platform that communicates with
the CrOS DUT over SSH).
"""
return os_name == 'chromeos' and py_utils.GetHostOsName() == 'chromeos'
def FetchPath(binary_name, os_name, arch, os_version=None):
""" Return a path to the appropriate executable for <binary_name>, downloading
from cloud storage if needed, or None if it cannot be found.
"""
if GetBinaryManager() is None:
raise exceptions.InitializationError(
'Called FetchPath with uninitialized binary manager.')
return GetBinaryManager().FetchPath(
binary_name, 'linux' if _IsChromeOSLocalMode(os_name) else os_name,
arch, os_version)
def LocalPath(binary_name, os_name, arch, os_version=None):
""" Return a local path to the given binary name, or None if an executable
cannot be found. Will not download the executable.
"""
if GetBinaryManager() is None:
raise exceptions.InitializationError(
'Called LocalPath with uninitialized binary manager.')
return GetBinaryManager().LocalPath(binary_name, os_name, arch, os_version)
def FetchBinaryDependencies(
platform, client_configs, fetch_reference_chrome_binary):
""" Fetch all binary dependenencies for the given |platform|.
Note: we don't fetch browser binaries by default because the size of the
binary is about 2Gb, and it requires cloud storage permission to
chrome-telemetry bucket.
Args:
platform: an instance of telemetry.core.platform
client_configs: A list of paths (string) to dependencies json files.
fetch_reference_chrome_binary: whether to fetch reference chrome binary for
the given platform.
"""
configs = [
dependency_manager.BaseConfig(TELEMETRY_PROJECT_CONFIG),
]
dep_manager = dependency_manager.DependencyManager(configs)
os_name = platform.GetOSName()
# If we're running directly on a Chrome OS device, fetch the binaries for
# linux instead, which should be compatible with CrOS. Otherwise, if we're
# running remotely on CrOS, fetch the binaries for the host platform like
# we do with android below.
if _IsChromeOSLocalMode(os_name):
os_name = 'linux'
target_platform = '%s_%s' % (os_name, platform.GetArchName())
dep_manager.PrefetchPaths(target_platform)
host_platform = None
fetch_devil_deps = False
if os_name in ('android', 'chromeos'):
host_platform = '%s_%s' % (
py_utils.GetHostOsName(), py_utils.GetHostArchName())
dep_manager.PrefetchPaths(host_platform)
if os_name == 'android':
if host_platform == 'linux_x86_64':
fetch_devil_deps = True
else:
logging.error('Devil only supports 64 bit linux as a host platform. '
'Android tests may fail.')
if fetch_reference_chrome_binary:
_FetchReferenceBrowserBinary(platform)
# For now, handle client config separately because the BUILD.gn & .isolate of
# telemetry tests in chromium src failed to include the files specified in its
# client config.
# (https://github.com/catapult-project/catapult/issues/2192)
# For now this is ok because the client configs usually don't include cloud
# storage infos.
# TODO(crbug.com/1111556): remove the logic of swallowing exception once the
# issue is fixed on Chromium side.
if client_configs:
manager = dependency_manager.DependencyManager(
list(dependency_manager.BaseConfig(c) for c in client_configs))
try:
manager.PrefetchPaths(target_platform)
if host_platform is not None:
manager.PrefetchPaths(host_platform)
except dependency_manager.NoPathFoundError as e:
logging.error('Error when trying to prefetch paths for %s: %s',
target_platform, e)
if fetch_devil_deps:
devil_env.config.Initialize()
devil_env.config.PrefetchPaths(arch=platform.GetArchName())
devil_env.config.PrefetchPaths()
def ReinstallAndroidHelperIfNeeded(binary_name, install_path, device):
""" Install a binary helper to a specific location.
Args:
binary_name: (str) The name of the binary from binary_dependencies.json
install_path: (str) The path to install the binary at
device: (device_utils.DeviceUtils) a device to install the helper to
Raises:
Exception: When the binary could not be fetched or could not be pushed to
the device.
"""
if (device.serial, install_path) in _installed_helpers:
return
host_path = FetchPath(binary_name, 'android', device.GetABI())
if not host_path:
raise Exception(
'%s binary could not be fetched as %s', binary_name, host_path)
device.PushChangedFiles([(host_path, install_path)])
device.RunShellCommand(['chmod', '777', install_path], check_return=True)
_installed_helpers.add((device.serial, install_path))
def _FetchReferenceBrowserBinary(platform):
os_name = platform.GetOSName()
if _IsChromeOSLocalMode(os_name):
os_name = 'linux'
arch_name = platform.GetArchName()
manager = binary_manager.BinaryManager(
[CHROME_BINARY_CONFIG])
if os_name == 'android':
os_version = dependency_util.GetChromeApkOsVersion(
platform.GetOSVersionName())
manager.FetchPath(
'chrome_stable', os_name, arch_name, os_version)
else:
manager.FetchPath(
'chrome_stable', os_name, arch_name)
def UpdateDependency(dependency, dep_local_path, version,
os_name=None, arch_name=None):
config = os.path.join(
util.GetTelemetryDir(), 'telemetry', 'binary_dependencies.json')
if not os_name:
assert not arch_name, 'arch_name is specified but not os_name'
os_name = py_utils.GetHostOsName()
arch_name = py_utils.GetHostArchName()
else:
assert arch_name, 'os_name is specified but not arch_name'
dep_platform = '%s_%s' % (os_name, arch_name)
c = base_config.BaseConfig(config, writable=True)
try:
old_version = c.GetVersion(dependency, dep_platform)
print('Updating from version: {}'.format(old_version))
except ValueError:
raise RuntimeError(
('binary_dependencies.json entry for %s missing or invalid; please add '
'it first! (need download_path and path_within_archive)') %
dep_platform)
if dep_local_path:
c.AddCloudStorageDependencyUpdateJob(
dependency, dep_platform, dep_local_path, version=version,
execute_job=True)
| catapult-project/catapult | telemetry/telemetry/internal/util/binary_manager.py | Python | bsd-3-clause | 9,298 |
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import decimal
import re
import sys
import warnings
def _setup_environment(environ):
import platform
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
try:
import pytz
except ImportError:
pytz = None
from django.db import utils
from django.db.backends import *
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import force_bytes, force_text
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_zoneinfo_database = pytz is not None
supports_bitwise_or = False
can_defer_constraint_checks = True
ignores_nulls_in_unique_constraints = False
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
atomic_transactions = False
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % result
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
sql = "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_text(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = ''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode):
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%','%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
self.connection.cursor()
return self.connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def value_to_db_datetime(self, value):
if value is None:
return None
# Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return six.text_type(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
def year_lookup_bounds_for_datetime_field(self, value):
# The default implementation uses datetime objects for the bounds.
# This must be overridden here, to use a formatted date (string) as
# 'second' instead -- cx_Oracle chops the fraction-of-second part
# off of datetime objects, leaving almost an entire second out of
# the year under the default implementation.
bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
if settings.USE_TZ:
bounds = [b.astimezone(timezone.utc).replace(tzinfo=None) for b in bounds]
return [b.isoformat(b' ') for b in bounds]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
cursor.close()
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version is not None and self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
try:
self.connection.stmtcachesize = 20
except:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):
pass
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
if hasattr(self.connection, 'ping'): # Oracle 10g R2 and higher
self.connection.ping()
else:
# Use a cx_Oracle cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1 FROM DUAL")
except Database.Error:
return False
else:
return True
@cached_property
def oracle_version(self):
with self.temporary_connection():
version = self.connection.version
try:
return int(version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and isinstance(param, datetime.datetime):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = param.astimezone(timezone.utc).replace(tzinfo=None)
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = "1"
elif param is False:
param = "0"
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, six.memoryview):
self.force_bytes = param
else:
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif isinstance(param, six.string_types) and len(param) > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instanciate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return dict((k,OracleParam(v, self, True)) for k,v in params.items())
except AttributeError:
return tuple([OracleParam(p, self, True) for p in params])
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return dict((k, v.force_bytes) for k,v in params.items())
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = dict((k, ":%s"%k) for k in params.keys())
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams]+[self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([_rowfactory(r, self.cursor)
for r in self.cursor.fetchall()])
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
| atruberg/django-custom | django/db/backends/oracle/base.py | Python | bsd-3-clause | 39,830 |
# -*- coding: utf-8 -*-
# pylint: disable=E265
"""
lantz.drivers.andor.ccd
~~~~~~~~~~~~~~~~~~~~~~~
Low level driver wrapping library for CCD and Intensified CCD cameras.
Only functions for iXon EMCCD cameras were tested.
Only tested in Windows OS.
The driver was written for the single-camera scenario. If more than one
camera is present, some 'read_once=True' should be erased but it
shouldn't be necessary to make any more changes.
Sources::
- Andor SDK 2.96 Manual
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import numpy as np
import ctypes as ct
from collections import namedtuple
from lantz import Driver, Feat, Action, DictFeat
from lantz.errors import InstrumentError
from lantz.foreign import LibraryDriver
from lantz import Q_
degC = Q_(1, 'degC')
us = Q_(1, 'us')
MHz = Q_(1, 'MHz')
seg = Q_(1, 's')
_ERRORS = {
20002: 'DRV_SUCCESS',
20003: 'DRV_VXDNOTINSTALLED',
20004: 'DRV_ERROR_SCAN',
20005: 'DRV_ERROR_CHECK_SUM',
20006: 'DRV_ERROR_FILELOAD',
20007: 'DRV_UNKNOWN_FUNCTION',
20008: 'DRV_ERROR_VXD_INIT',
20009: 'DRV_ERROR_ADDRESS',
20010: 'DRV_ERROR_PAGELOCK',
20011: 'DRV_ERROR_PAGE_UNLOCK',
20012: 'DRV_ERROR_BOARDTEST',
20013: 'Unable to communicate with card.',
20014: 'DRV_ERROR_UP_FIFO',
20015: 'DRV_ERROR_PATTERN',
20017: 'DRV_ACQUISITION_ERRORS',
20018: 'Computer unable to read the data via the ISA slot at the required rate.',
20019: 'DRV_ACQ_DOWNFIFO_FULL',
20020: 'RV_PROC_UNKNOWN_INSTRUCTION',
20021: 'DRV_ILLEGAL_OP_CODE',
20022: 'Unable to meet Kinetic cycle time.',
20023: 'Unable to meet Accumulate cycle time.',
20024: 'No acquisition has taken place',
20026: 'Overflow of the spool buffer.',
20027: 'DRV_SPOOLSETUPERROR',
20033: 'DRV_TEMPERATURE_CODES',
20034: 'Temperature is OFF.',
20035: 'Temperature reached but not stabilized.',
20036: 'Temperature has stabilized at set point.',
20037: 'Temperature has not reached set point.',
20038: 'DRV_TEMPERATURE_OUT_RANGE',
20039: 'DRV_TEMPERATURE_NOT_SUPPORTED',
20040: 'Temperature had stabilized but has since drifted.',
20049: 'DRV_GENERAL_ERRORS',
20050: 'DRV_INVALID_AUX',
20051: 'DRV_COF_NOTLOADED',
20052: 'DRV_FPGAPROG',
20053: 'DRV_FLEXERROR',
20054: 'DRV_GPIBERROR',
20064: 'DRV_DATATYPE',
20065: 'DRV_DRIVER_ERRORS',
20066: 'Invalid parameter 1',
20067: 'Invalid parameter 2',
20068: 'Invalid parameter 3',
20069: 'Invalid parameter 4',
20070: 'DRV_INIERROR',
20071: 'DRV_COFERROR',
20072: 'Acquisition in progress',
20073: 'The system is not currently acquiring',
20074: 'DRV_TEMPCYCLE',
20075: 'System not initialized',
20076: 'DRV_P5INVALID',
20077: 'DRV_P6INVALID',
20078: 'Not a valid mode',
20079: 'DRV_INVALID_FILTER',
20080: 'DRV_I2CERRORS',
20081: 'DRV_DRV_I2CDEVNOTFOUND',
20082: 'DRV_I2CTIMEOUT',
20083: 'DRV_P7INVALID',
20089: 'DRV_USBERROR',
20090: 'DRV_IOCERROR',
20091: 'DRV_VRMVERSIONERROR',
20093: 'DRV_USB_INTERRUPT_ENDPOINT_ERROR',
20094: 'DRV_RANDOM_TRACK_ERROR',
20095: 'DRV_INVALID_TRIGGER_MODE',
20096: 'DRV_LOAD_FIRMWARE_ERROR',
20097: 'DRV_DIVIDE_BY_ZERO_ERROR',
20098: 'DRV_INVALID_RINGEXPOSURES',
20099: 'DRV_BINNING_ERROR',
20990: 'No camera present',
20991: 'Feature not supported on this camera.',
20992: 'Feature is not available at the moment.',
20115: 'DRV_ERROR_MAP',
20116: 'DRV_ERROR_UNMAP',
20117: 'DRV_ERROR_MDL',
20118: 'DRV_ERROR_UNMDL',
20119: 'DRV_ERROR_BUFFSIZE',
20121: 'DRV_ERROR_NOHANDLE',
20130: 'DRV_GATING_NOT_AVAILABLE',
20131: 'DRV_FPGA_VOLTAGE_ERROR',
20100: 'DRV_INVALID_AMPLIFIER',
20101: 'DRV_INVALID_COUNTCONVERT_MODE'
}
class CCD(LibraryDriver):
LIBRARY_NAME = 'atmcd64d.dll'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cameraIndex = ct.c_int(0)
def _patch_functions(self):
internal = self.lib.internal
internal.GetCameraSerialNumber.argtypes = [ct.pointer(ct.c_uint)]
internal.Filter_SetAveragingFactor.argtypes = [ct.c_int]
internal.Filter_SetThreshold.argtypes = ct.c_float
internal.Filter_GetThreshold.argtypes = ct.c_float
def _return_handler(self, func_name, ret_value):
excl_func = ['GetTemperatureF', 'IsCountConvertModeAvailable',
'IsAmplifierAvailable', 'IsTriggerModeAvailable']
if ret_value != 20002 and func_name not in excl_func:
raise InstrumentError('{}'.format(_ERRORS[ret_value]))
return ret_value
def initialize(self):
""" This function will initialize the Andor SDK System. As part of the
initialization procedure on some cameras (i.e. Classic, iStar and
earlier iXion) the DLL will need access to a DETECTOR.INI which
contains information relating to the detector head, number pixels,
readout speeds etc. If your system has multiple cameras then see the
section Controlling multiple cameras.
"""
self.lib.Initialize()
self.triggers = {'Internal': 0, 'External': 1, 'External Start': 6,
'External Exposure': 7, 'External FVB EM': 9,
'Software Trigger': 10,
'External Charge Shifting': 12}
self.savetypes = {'Signed16bits': 1, 'Signed32bits': 2, 'Float': 3}
# Initial values
self.readout_packing_state = False
self.readout_packing = self.readout_packing_state
self.readout_mode_mode = 'Image'
self.readout_mode = self.readout_mode_mode
self.photon_counting_mode_state = False
self.photon_counting_mode = self.photon_counting_mode_state
self.frame_transfer_mode_state = False
self.frame_transfer_mode = self.frame_transfer_mode_state
self.fan_mode_index = 'onfull'
self.fan_mode = self.fan_mode_index
self.EM_gain_mode_index = 'RealGain'
self.EM_gain_mode = self.EM_gain_mode_index
self.cooled_on_shutdown_value = False
self.cooled_on_shutdown = self.cooled_on_shutdown_value
self.baseline_offset_value = 100
self.baseline_offset = self.baseline_offset_value
self.adv_trigger_mode_state = True
self.adv_trigger_mode = self.adv_trigger_mode_state
self.acq_mode = 'Single Scan'
self.acquisition_mode = self.acq_mode
self.amp_typ = 0
self.horiz_shift_speed_index = 0
self.horiz_shift_speed = self.horiz_shift_speed_index
self.vert_shift_speed_index = 0
self.vert_shift_speed = self.vert_shift_speed_index
self.preamp_index = 0
self.preamp = self.preamp_index
self.temperature_sp = 0 * degC
self.temperature_setpoint = self.temperature_sp
self.auxout = np.zeros(4, dtype=bool)
for i in np.arange(1, 5):
self.out_aux_port[i] = False
self.trigger_mode_index = 'Internal'
self.trigger_mode = self.trigger_mode_index
def finalize(self):
"""Finalize Library. Concluding function.
"""
if self.status != 'Camera is idle, waiting for instructions.':
self.abort_acquisition()
self.cooler_on = False
self.free_int_mem()
self.lib.ShutDown()
### SYSTEM INFORMATION
@Feat(read_once=True)
def ncameras(self):
"""This function returns the total number of Andor cameras currently
installed. It is possible to call this function before any of the
cameras are initialized.
"""
n = ct.c_long()
self.lib.GetAvailableCameras(ct.pointer(n))
return n.value
def camera_handle(self, index):
"""This function returns the handle for the camera specified by
cameraIndex. When multiple Andor cameras are installed the handle of
each camera must be retrieved in order to select a camera using the
SetCurrentCamera function.
The number of cameras can be obtained using the GetAvailableCameras
function.
:param index: index of any of the installed cameras.
Valid values: 0 to NumberCameras-1 where NumberCameras
is the value returned by the GetAvailableCameras function.
"""
index = ct.c_long(index)
handle = ct.c_long()
self.lib.GetCameraHandle(index, ct.pointer(handle))
return handle.value
@Feat()
def current_camera(self):
"""When multiple Andor cameras are installed this function allows the
user to select which camera is currently active. Once a camera has been
selected the other functions can be called as normal but they will only
apply to the selected camera. If only 1 camera is installed calling
this function is not required since that camera will be selected by
default.
"""
n = ct.c_long() # current camera handler
self.lib.GetCurrentCamera(ct.pointer(n))
return n.value
@current_camera.setter
def current_camera(self, value):
value = ct.c_long(value)
self.lib.SetCurrentCamera(value.value) # needs camera handler
@Feat(read_once=True)
def idn(self):
"""Identification of the device
"""
hname = (ct.c_char * 100)()
self.lib.GetHeadModel(ct.pointer(hname))
hname = str(hname.value)[2:-1]
sn = ct.c_uint()
self.lib.GetCameraSerialNumber(ct.pointer(sn))
return 'Andor ' + hname + ', serial number ' + str(sn.value)
@Feat(read_once=True)
def hardware_version(self):
pcb, decode = ct.c_uint(), ct.c_uint()
dummy1, dummy2 = ct.c_uint(), ct.c_uint()
firmware_ver, firmware_build = ct.c_uint(), ct.c_uint()
self.lib.GetHardwareVersion(ct.pointer(pcb), ct.pointer(decode),
ct.pointer(dummy1), ct.pointer(dummy2),
ct.pointer(firmware_ver),
ct.pointer(firmware_build))
results = namedtuple('hardware_versions',
'PCB Flex10K CameraFirmware CameraFirmwareBuild')
return results(pcb.value, decode.value, firmware_ver.value,
firmware_build.value)
@Feat(read_once=True)
def software_version(self):
eprom, coffile, vxdrev = ct.c_uint(), ct.c_uint(), ct.c_uint()
vxdver, dllrev, dllver = ct.c_uint(), ct.c_uint(), ct.c_uint()
self.lib.GetSoftwareVersion(ct.pointer(eprom), ct.pointer(coffile),
ct.pointer(vxdrev), ct.pointer(vxdver),
ct.pointer(dllrev), ct.pointer(dllver))
results = namedtuple('software_versions',
'EPROM COF DriverRev DriverVer DLLRev DLLVer')
return results(eprom.value, coffile.value, vxdrev.value,
vxdver.value, dllrev.value, dllver.value)
# TODO: Make sense of this:
@Feat(read_once=True)
def capabilities(self):
"""This function will fill in an AndorCapabilities structure with the
capabilities associated with the connected camera. Individual
capabilities are determined by examining certain bits and combinations
of bits in the member variables of the AndorCapabilites structure.
"""
class Capabilities(ct.Structure):
_fields_ = [("Size", ct.c_ulong),
("AcqModes", ct.c_ulong),
("ReadModes", ct.c_ulong),
("FTReadModes", ct.c_ulong),
("TriggerModes", ct.c_ulong),
("CameraType", ct.c_ulong),
("PixelModes", ct.c_ulong),
("SetFunctions", ct.c_ulong),
("GetFunctions", ct.c_ulong),
("Features", ct.c_ulong),
("PCICard", ct.c_ulong),
("EMGainCapability", ct.c_ulong)]
stru = Capabilities()
stru.Size = ct.sizeof(stru)
self.lib.GetCapabilities(ct.pointer(stru))
return stru
@Feat(read_once=True)
def controller_card(self):
"""This function will retrieve the type of PCI controller card included
in your system. This function is not applicable for USB systems. The
maximum number of characters that can be returned from this function is
10.
"""
model = ct.c_wchar_p()
self.lib.GetControllerCardModel(ct.pointer(model))
return model.value
@Feat(read_once=True)
def count_convert_wavelength_range(self):
"""This function returns the valid wavelength range available in Count
Convert mode."""
mini = ct.c_float()
maxi = ct.c_float()
self.lib.GetCountConvertWavelengthRange(ct.pointer(mini),
ct.pointer(maxi))
return (mini.value, maxi.value)
@Feat(read_once=True)
def detector_shape(self):
xp, yp = ct.c_int(), ct.c_int()
self.lib.GetDetector(ct.pointer(xp), ct.pointer(yp))
return (xp.value, yp.value)
@Feat(read_once=True)
def px_size(self):
"""This function returns the dimension of the pixels in the detector
in microns.
"""
xp, yp = ct.c_float(), ct.c_float()
self.lib.GetPixelSize(ct.pointer(xp), ct.pointer(yp))
return (xp.value, yp.value)
def QE(self, wl):
"""Returns the percentage QE for a particular head model at a user
specified wavelength.
"""
hname = (ct.c_char * 100)()
self.lib.GetHeadModel(ct.pointer(hname))
wl = ct.c_float(wl)
qe = ct.c_float()
self.lib.GetQE(ct.pointer(hname), wl, ct.c_uint(0), ct.pointer(qe))
return qe.value
def sensitivity(self, ad, amp, i, pa):
"""This function returns the sensitivity for a particular speed.
"""
sens = ct.c_float()
ad, amp, i, pa = ct.c_int(ad), ct.c_int(amp), ct.c_int(i), ct.c_int(pa)
self.lib.GetSensitivity(ad, amp, i, pa, ct.pointer(sens))
return sens.value
def count_convert_available(self, mode):
"""This function checks if the hardware and current settings permit
the use of the specified Count Convert mode.
"""
mode = ct.c_int(mode)
ans = self.lib.IsCountConvertModeAvailable(mode)
if ans == 20002:
return True
else:
return False
### SHUTTER # I couldn't find a better way to do this... sorry
@Action()
def shutter(self, typ, mode, ext_closing, ext_opening, ext_mode):
"""This function expands the control offered by SetShutter to allow an
external shutter and internal shutter to be controlled independently
(only available on some cameras – please consult your Camera User
Guide). The typ parameter allows the user to control the TTL signal
output to an external shutter. The opening and closing times specify
the length of time required to open and close the shutter (this
information is required for calculating acquisition timings – see
SHUTTER TRANSFER TIME).
The mode and extmode parameters control the behaviour of the internal
and external shutters. To have an external shutter open and close
automatically in an experiment, set the mode parameter to “Open” and
set the extmode parameter to “Auto”. To have an internal shutter open
and close automatically in an experiment, set the extmode parameter to
“Open” and set the mode parameter to “Auto”.
To not use any shutter in the experiment, set both shutter modes to
permanently open.
:param typ: 0 (or 1) Output TTL low (or high) signal to open shutter.
:param mode: Internal shutter: 0 Fully Auto, 1 Permanently Open,
2 Permanently Closed, 4 Open for FVB series, 5 Open for any series.
:param ext_closing: Time shutter takes to close (milliseconds)
:param ext_opening: Time shutter takes to open (milliseconds)
:param ext_mode: External shutter: 0 Fully Auto, 1 Permanently Open,
2 Permanently Closed, 4 Open for FVB series, 5 Open for any series.
"""
self.lib.SetShutterEx(ct.c_int(typ), ct.c_int(mode),
ct.c_int(ext_closing), ct.c_int(ext_opening),
ct.c_int(ext_mode))
@Feat(read_once=True)
def shutter_min_times(self):
""" This function will return the minimum opening and closing times in
milliseconds for the shutter on the current camera.
"""
otime, ctime = ct.c_int(), ct.c_int()
self.lib.GetShutterMinTimes(ct.pointer(ctime), ct.pointer(otime))
return (otime.value, ctime.value)
@Feat(read_once=True)
def has_mechanical_shutter(self):
state = ct.c_int()
self.lib.IsInternalMechanicalShutter(ct.pointer(state))
return bool(state.value)
### TEMPERATURE
@Feat(read_once=True, units='degC')
def min_temperature(self):
"""This function returns the valid range of temperatures in centigrads
to which the detector can be cooled.
"""
mini, maxi = ct.c_int(), ct.c_int()
self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))
return mini.value
@Feat(read_once=True, units='degC')
def max_temperature(self):
"""This function returns the valid range of temperatures in centigrads
to which the detector can be cooled.
"""
mini, maxi = ct.c_int(), ct.c_int()
self.lib.GetTemperatureRange(ct.pointer(mini), ct.pointer(maxi))
return maxi.value
@Feat()
def temperature_status(self):
"""This function returns the temperature of the detector to the
nearest degree. It also gives the status of cooling process.
"""
temp = ct.c_float()
ans = self.lib.GetTemperatureF(ct.pointer(temp))
return _ERRORS[ans]
@Feat(units='degC')
def temperature(self):
"""This function returns the temperature of the detector to the
nearest degree. It also gives the status of cooling process.
"""
temp = ct.c_float()
self.lib.GetTemperatureF(ct.pointer(temp))
return temp.value
@Feat(units='degC')
def temperature_setpoint(self):
return self.temperature_sp
@temperature_setpoint.setter
def temperature_setpoint(self, value):
self.temperature_sp = value
value = ct.c_int(int(value))
self.lib.SetTemperature(value)
@Feat(values={True: 1, False: 0})
def cooler_on(self):
state = ct.c_int()
self.lib.IsCoolerOn(ct.pointer(state))
return state.value
@cooler_on.setter
def cooler_on(self, value):
if value:
self.lib.CoolerON()
else:
self.lib.CoolerOFF()
@Feat(values={True: 1, False: 0})
def cooled_on_shutdown(self):
"""This function determines whether the cooler is switched off when
the camera is shut down.
"""
return self.cooled_on_shutdown_value
@cooled_on_shutdown.setter
def cooled_on_shutdown(self, state):
ans = self.lib.SetCoolerMode(ct.c_int(state))
if ans == 20002:
self.cooled_on_shutdown_value = state
@Feat(values={'onfull': 0, 'onlow': 1, 'off': 2})
def fan_mode(self):
"""Allows the user to control the mode of the camera fan. If the
system is cooled, the fan should only be turned off for short periods
of time. During this time the body of the camera will warm up which
could compromise cooling capabilities.
If the camera body reaches too high a temperature, depends on camera,
the buzzer will sound. If this happens, turn off the external power
supply and allow the system to stabilize before continuing.
"""
return self.fan_mode_index
@fan_mode.setter
def fan_mode(self, mode):
ans = self.lib.SetFanMode(ct.c_int(mode))
if ans == 20002:
self.fan_mode_index = mode
### FILTERS
@Feat()
def averaging_factor(self):
"""Averaging factor to be used with the recursive filter. For
information on the various data averaging filters available see
DATA AVERAGING FILTERS in the Special Guides section of the manual.
"""
af = ct.c_uint()
self.lib.Filter_GetAveragingFactor(ct.pointer(af))
return af.value
@averaging_factor.setter
def averaging_factor(self, value):
self.lib.Filter_SetAveragingFactor(ct.c_uint(value))
@Feat()
def averaging_frame_count(self):
"""Number of frames to be used when using the frame averaging filter.
"""
fc = ct.c_uint()
self.lib.Filter_GetAveragingFrameCount(ct.pointer(fc))
return fc.value
@averaging_frame_count.setter
def averaging_frame_count(self, value):
self.lib.Filter_SetAveragingFrameCount(ct.c_uint(value))
@Feat(values={'NAF': 0, 'RAF': 5, 'FAF': 6})
def averaging_mode(self):
"""Current averaging mode.
Valid options are:
0 – No Averaging Filter
5 – Recursive Averaging Filter
6 – Frame Averaging Filter
"""
i = ct.c_int()
self.lib.Filter_GetDataAveragingMode(ct.pointer(i))
return i.value
@averaging_mode.setter
def averaging_mode(self, value):
self.lib.Filter_SetDataAveragingMode(ct.c_int(value))
@Feat(values={'NF': 0, 'MF': 1, 'LAF': 2, 'IRF': 3, 'NTF': 4})
def noise_filter_mode(self):
"""Set the Noise Filter to use; For information on the various
spurious noise filters available see SPURIOUS NOISE FILTERS in the
Special Guides section of the manual.
Valid options are:
0 – No Averaging Filter
1 – Median Filter
2 – Level Above Filter
3 – Interquartile Range Filter
4 – Noise Threshold Filter
"""
i = ct.c_uint()
self.lib.Filter_GetMode(ct.pointer(i))
return i.value
@noise_filter_mode.setter
def noise_filter_mode(self, value):
self.lib.Filter_SetMode(ct.c_uint(value))
@Feat()
def filter_threshold(self):
"""Sets the threshold value for the Noise Filter. For information on
the various spurious noise filters available see SPURIOUS NOISE FILTERS
in the Special Guides section of the manual.
Valid values are:
0 – 65535 for Level Above filte
0 – 10 for all other filters.
"""
f = ct.c_float()
self.lib.Filter_GetThreshold(ct.pointer(f))
return f.value
@filter_threshold.setter
def filter_threshold(self, value):
self.lib.Filter_SetThreshold(ct.c_float(value))
@Feat(values={True: 2, False: 0})
def cr_filter_enabled(self):
"""This function will set the state of the cosmic ray filter mode for
future acquisitions. If the filter mode is on, consecutive scans in an
accumulation will be compared and any cosmic ray-like features that are
only present in one scan will be replaced with a scaled version of the
corresponding pixel value in the correct scan.
"""
i = ct.c_int()
self.lib.GetFilterMode(ct.pointer(i))
return i.value
@cr_filter_enabled.setter
def cr_filter_enabled(self, value):
self.lib.SetFilterMode(ct.c_int(value))
### PHOTON COUNTING MODE
@Feat(values={True: 1, False: 0}) # FIXME: untested
def photon_counting_mode(self):
"""This function activates the photon counting option.
"""
return self.photon_counting_mode_state
@photon_counting_mode.setter
def photon_counting_mode(self, state):
ans = self.lib.SetPhotonCounting(ct.c_int(state))
if ans == 20002:
self.photon_counting_mode_state = state
@Feat(read_once=True)
def n_photon_counting_div(self):
"""Available in some systems is photon counting mode. This function
gets the number of photon counting divisions available. The functions
SetPhotonCounting and SetPhotonCountingThreshold can be used to specify
which of these divisions is to be used.
"""
inti = ct.c_ulong()
self.lib.GetNumberPhotonCountingDivisions(ct.pointer(inti))
return inti.value
@Action() # untested
def set_photon_counting_divs(self, n, thres):
"""This function sets the thresholds for the photon counting option.
"""
thres = ct.c_long(thres)
self.lib.SetPhotonCountingDivisions(ct.c_ulong(n), ct.pointer(thres))
@Action()
def set_photon_counting_thres(self, mini, maxi):
"""This function sets the minimum and maximum threshold in counts
(1-65535) for the photon counting option.
"""
self.lib.SetPhotonCountingThreshold(ct.c_long(mini), ct.c_long(maxi))
### FAST KINETICS MODE
@Feat(units='s')
def FK_exposure_time(self):
"""This function will return the current “valid” exposure time for a
fast kinetics acquisition. This function should be used after all the
acquisitions settings have been set, i.e. SetFastKinetics and
SetFKVShiftSpeed. The value returned is the actual time used in
subsequent acquisitions.
"""
f = ct.c_float()
self.lib.GetFKExposureTime(ct.pointer(f))
return f.value
### ACQUISITION HANDLING
@Feat(values={'Single Scan': 1, 'Accumulate': 2, 'Kinetics': 3,
'Fast Kinetics': 4, 'Run till abort': 5})
def acquisition_mode(self):
"""This function will set the acquisition mode to be used on the next
StartAcquisition.
NOTE: In Mode 5 the system uses a “Run Till Abort” acquisition mode. In
Mode 5 only, the camera continually acquires data until the
AbortAcquisition function is called. By using the SetDriverEvent
function you will be notified as each acquisition is completed.
"""
return self.acq_mode
@acquisition_mode.setter
def acquisition_mode(self, mode):
ans = self.lib.SetAcquisitionMode(ct.c_int(mode))
if ans == 20002:
self.acq_mode = mode
@Action()
def prepare_acquisition(self):
"""This function reads the current acquisition setup and allocates and
configures any memory that will be used during the acquisition. The
function call is not required as it will be called automatically by the
StartAcquisition function if it has not already been called externally.
However for long kinetic series acquisitions the time to allocate and
configure any memory can be quite long which can result in a long delay
between calling StartAcquisition and the acquisition actually
commencing. For iDus, there is an additional delay caused by the camera
being set-up with any new acquisition parameters. Calling
PrepareAcquisition first will reduce this delay in the StartAcquisition
call.
"""
self.lib.PrepareAcquisition()
@Action()
def start_acquisition(self):
"""This function starts an acquisition. The status of the acquisition
can be monitored via GetStatus().
"""
self.lib.StartAcquisition()
@Action()
def abort_acquisition(self):
"""This function aborts the current acquisition if one is active
"""
self.lib.AbortAcquisition()
@Action()
def wait_for_acquisition(self):
"""WaitForAcquisition can be called after an acquisition is started
using StartAcquisition to put the calling thread to sleep until an
Acquisition Event occurs. This can be used as a simple alternative to
the functionality provided by the SetDriverEvent function, as all Event
creation and handling is performed internally by the SDK library.
Like the SetDriverEvent functionality it will use less processor
resources than continuously polling with the GetStatus function. If you
wish to restart the calling thread without waiting for an Acquisition
event, call the function CancelWait.
An Acquisition Event occurs each time a new image is acquired during an
Accumulation, Kinetic Series or Run-Till-Abort acquisition or at the
end of a Single Scan Acquisition.
If a second event occurs before the first one has been acknowledged,
the first one will be ignored. Care should be taken in this case, as
you may have to use CancelWait to exit the function.
"""
self.lib.WaitForAcquisition()
@Action()
def cancel_wait(self):
"""This function restarts a thread which is sleeping within the
WaitForAcquisition function. The sleeping thread will return from
WaitForAcquisition with a value not equal to DRV_SUCCESS.
"""
self.lib.CancelWait()
@Feat()
def acquisition_progress(self):
"""This function will return information on the progress of the
current acquisition. It can be called at any time but is best used in
conjunction with SetDriverEvent.
The values returned show the number of completed scans in the current
acquisition. If 0 is returned for both accum and series then either:
- No acquisition is currently running
- The acquisition has just completed
- The very first scan of an acquisition has just started and not yet
completed.
GetStatus can be used to confirm if the first scan has just started,
returning DRV_ACQUIRING, otherwise it will return DRV_IDLE.
For example, if accum=2 and series=3 then the acquisition has completed
3 in the series and 2 accumulations in the 4 scan of the series
"""
acc = ct.c_long()
series = ct.c_long()
self.lib.GetAcquisitionProgress(ct.pointer(acc), ct.pointer(series))
return acc.value, series.value
@Feat()
def status(self):
"""This function will return the current status of the Andor SDK
system. This function should be called before an acquisition is started
to ensure that it is IDLE and during an acquisition to monitor the
process.
"""
st = ct.c_int()
self.lib.GetStatus(ct.pointer(st))
if st.value == 20073:
return 'Camera is idle, waiting for instructions.'
elif st.value == 20074:
return 'Camera is executing the temperature cycle.'
elif st.value == 20072:
return 'Acquisition in progress.'
elif st.value == 20023:
return 'Unable to meet accumulate cycle time.'
elif st.value == 20022:
return 'Unable to meet kinetic cycle time.'
elif st.value == 20013:
return 'Unable to communicate with card.'
elif st.value == 20018:
return ('Computer unable to read the data via the ISA slot at the '
'required rate.')
elif st.value == 20026:
return 'Overflow of the spool buffer.'
@Feat()
def n_exposures_in_ring(self):
"""Gets the number of exposures in the ring at this moment."""
n = ct.c_int()
self.lib.GetNumberRingExposureTimes(ct.pointer(n))
return n.value
@Feat()
def buffer_size(self):
"""This function will return the maximum number of images the circular
buffer can store based on the current acquisition settings.
"""
n = ct.c_long()
self.lib.GetSizeOfCircularBuffer(ct.pointer(n))
return n.value
@Feat(values={True: 1, False: 0})
def exposing(self):
"""This function will return if the system is exposing or not. The
status of the firepulse will be returned.
NOTE This is only supported by the CCI23 card.
"""
i = ct.c_int()
self.lib.GetCameraEventStatus(ct.pointer(i))
return i.value
@Feat()
def n_images_acquired(self):
"""This function will return the total number of images acquired since
the current acquisition started. If the camera is idle the value
returned is the number of images acquired during the last acquisition.
"""
n = ct.c_long()
self.lib.GetTotalNumberImagesAcquired(ct.pointer(n))
return n.value
@Action()
def set_image(self, shape=None, binned=(1, 1), p_0=(1, 1)):
"""This function will set the horizontal and vertical binning to be
used when taking a full resolution image.
:param hbin: number of pixels to bin horizontally.
:param vbin: number of pixels to bin vertically.
:param hstart: Start column (inclusive).
:param hend: End column (inclusive).
:param vstart: Start row (inclusive).
:param vend: End row (inclusive).
"""
if shape is None:
shape = self.detector_shape
(hbin, vbin) = binned
(hstart, vstart) = p_0
(hend, vend) = (p_0[0] + shape[0] - 1, p_0[1] + shape[1] - 1)
self.lib.SetImage(ct.c_int(hbin), ct.c_int(vbin),
ct.c_int(hstart), ct.c_int(hend),
ct.c_int(vstart), ct.c_int(vend))
@Feat(values={'FVB': 0, 'Multi-Track': 1, 'Random-Track': 2,
'Single-Track': 3, 'Image': 4})
def readout_mode(self):
"""This function will set the readout mode to be used on the subsequent
acquisitions.
"""
return self.readout_mode_mode
@readout_mode.setter
def readout_mode(self, mode):
ans = self.lib.SetReadMode(ct.c_int(mode))
if ans == 20002:
self.readout_mode_mode = mode
@Feat(values={True: 1, False: 0})
def readout_packing(self):
"""This function will configure whether data is packed into the readout
register to improve frame rates for sub-images.
Note: It is important to ensure that no light falls outside of the
sub-image area otherwise the acquired data will be corrupted. Only
currently available on iXon+ and iXon3.
"""
return self.readout_packing_state
@readout_packing.setter
def readout_packing(self, state):
ans = self.lib.SetReadoutRegisterPacking(ct.c_int(state))
if ans == 20002:
self.readout_packing_state = state
### DATA HANDLING
@Feat(read_once=True)
def min_image_length(self):
"""This function will return the minimum number of pixels that can be
read out from the chip at each exposure. This minimum value arises due
the way in which the chip is read out and will limit the possible sub
image dimensions and binning sizes that can be applied.
"""
# Will contain the minimum number of super pixels on return.
px = ct.c_int()
self.lib.GetMinimumImageLength(ct.pointer(px))
return px.value
@Action()
def free_int_mem(self):
"""The FreeInternalMemory function will deallocate any memory used
internally to store the previously acquired data. Note that once this
function has been called, data from last acquisition cannot be
retrived.
"""
self.lib.FreeInternalMemory()
def acquired_data(self, shape):
"""This function will return the data from the last acquisition. The
data are returned as long integers (32-bit signed integers). The
“array” must be large enough to hold the complete data set.
"""
size = np.array(shape).prod()
arr = np.ascontiguousarray(np.zeros(size, dtype=np.int32))
self.lib.GetAcquiredData(arr.ctypes.data_as(ct.POINTER(ct.c_int32)),
ct.c_ulong(size))
arr = arr.reshape(shape)
return arr
def acquired_data16(self, shape):
"""16-bit version of the GetAcquiredData function. The “array” must be
large enough to hold the complete data set.
"""
size = np.array(shape).prod()
arr = np.ascontiguousarray(np.zeros(size, dtype=np.int16))
self.lib.GetAcquiredData16(arr.ctypes.data_as(ct.POINTER(ct.c_int16)),
ct.c_ulong(size))
return arr.reshape(shape)
def oldest_image(self, shape):
"""This function will update the data array with the oldest image in
the circular buffer. Once the oldest image has been retrieved it no
longer is available. The data are returned as long integers (32-bit
signed integers). The "array" must be exactly the same size as the full
image.
"""
size = np.array(shape).prod()
array = np.ascontiguousarray(np.zeros(size, dtype=np.int32))
self.lib.GetOldestImage(array.ctypes.data_as(ct.POINTER(ct.c_int32)),
ct.c_ulong(size))
return array.reshape(shape)
def oldest_image16(self, shape):
"""16-bit version of the GetOldestImage function.
"""
size = np.array(shape).prod()
array = np.ascontiguousarray(np.zeros(size, dtype=np.int16))
self.lib.GetOldestImage16(array.ctypes.data_as(ct.POINTER(ct.c_int16)),
ct.c_ulong(size))
return array.reshape(shape)
def most_recent_image(self, shape):
"""This function will update the data array with the most recently
acquired image in any acquisition mode. The data are returned as long
integers (32-bit signed integers). The "array" must be exactly the same
size as the complete image.
"""
size = np.array(shape).prod()
arr = np.ascontiguousarray(np.zeros(size, dtype=np.int32))
self.lib.GetMostRecentImage(arr.ctypes.data_as(ct.POINTER(ct.c_int32)),
ct.c_ulong(size))
return arr.reshape(shape)
def most_recent_image16(self, shape):
"""16-bit version of the GetMostRecentImage function.
"""
size = np.array(shape).prod()
arr = np.ascontiguousarray(np.zeros(size, dtype=np.int16))
pt = ct.POINTER(ct.c_int16)
self.lib.GetMostRecentImage16(arr.ctypes.data_as(pt), ct.c_ulong(size))
return arr.reshape(shape)
def images(self, first, last, shape, validfirst, validlast):
"""This function will update the data array with the specified series
of images from the circular buffer. If the specified series is out of
range (i.e. the images have been overwritten or have not yet been
acquired) then an error will be returned.
:param first: index of first image in buffer to retrieve.
:param flast: index of last image in buffer to retrieve.
:param farr: pointer to data storage allocated by the user.
:param size: total number of pixels.
:param fvalidfirst: index of the first valid image.
:param fvalidlast: index of the last valid image.
"""
size = shape[0] * shape[1] * (1 + last - first)
array = np.ascontiguousarray(np.zeros(size, dtype=np.int32))
self.lib.GetImages(ct.c_long(first), ct.c_long(last),
array.ctypes.data_as(ct.POINTER(ct.c_int32)),
ct.c_ulong(size), ct.pointer(ct.c_long(validfirst)),
ct.pointer(ct.c_long(validlast)))
return array.reshape(-1, shape[0], shape[1])
def images16(self, first, last, shape, validfirst, validlast):
"""16-bit version of the GetImages function.
"""
size = shape[0] * shape[1] * (1 + last - first)
array = np.ascontiguousarray(np.zeros(size, dtype=np.int16))
self.lib.GetImages16(ct.c_long(first), ct.c_long(last),
array.ctypes.data_as(ct.POINTER(ct.c_int16)),
ct.c_ulong(size),
ct.pointer(ct.c_long(validfirst)),
ct.pointer(ct.c_long(validlast)))
return array.reshape(-1, shape[0], shape[1])
@Feat()
def new_images_index(self):
"""This function will return information on the number of new images
(i.e. images which have not yet been retrieved) in the circular buffer.
This information can be used with GetImages to retrieve a series of the
latest images. If any images are overwritten in the circular buffer
they can no longer be retrieved and the information returned will treat
overwritten images as having been retrieved.
"""
first = ct.c_long()
last = ct.c_long()
self.lib.GetNumberNewImages(ct.pointer(first), ct.pointer(last))
return (first.value, last.value)
@Feat() # TODO: test this
def available_images_index(self):
"""This function will return information on the number of available
images in the circular buffer. This information can be used with
GetImages to retrieve a series of images. If any images are overwritten
in the circular buffer they no longer can be retrieved and the
information returned will treat overwritten images as not available.
"""
first = ct.c_long()
last = ct.c_long()
self.lib.GetNumberAvailableImages(ct.pointer(first), ct.pointer(last))
return (first.value, last.value)
def set_dma_parameters(self, n_max_images, s_per_dma):
"""In order to facilitate high image readout rates the controller card
may wait for multiple images to be acquired before notifying the SDK
that new data is available. Without this facility, there is a chance
that hardware interrupts may be lost as the operating system does not
have enough time to respond to each interrupt. The drawback to this is
that you will not get the data for an image until all images for that
interrupt have been acquired.
There are 3 settings involved in determining how many images will be
acquired for each notification (DMA Interrupt) of the controller card
and they are as follows:
1. The size of the DMA buffer gives an upper limit on the number of
images that can be stored within it and is usually set to the size
of one full image when installing the software. This will usually
mean that if you acquire full frames there will never be more than
one image per DMA.
2. A second setting that is used is the minimum amount of time
(SecondsPerDMA) that should expire between interrupts. This can be
used to give an indication of the reponsiveness of the operating
system to interrupts. Decreasing this value will allow more
interrupts per second and should only be done for faster pcs. The
default value is 0.03s (30ms), finding the optimal value for your
pc can only be done through experimentation.
3. The third setting is an overide to the number of images
calculated using the previous settings. If the number of images per
dma is calculated to be greater than MaxImagesPerDMA then it will
be reduced to MaxImagesPerDMA. This can be used to, for example,
ensure that there is never more than 1 image per DMA by setting
MaxImagesPerDMA to 1. Setting MaxImagesPerDMA to zero removes this
limit. Care should be taken when modifying these parameters as
missed interrupts may prevent the acquisition from completing.
"""
self.lib.SetDMAParameters(ct.c_int(n_max_images),
ct.c_float(s_per_dma))
@Feat()
def max_images_per_dma(self):
"""This function will return the maximum number of images that can be
transferred during a single DMA transaction.
"""
n = ct.c_ulong()
self.lib.GetImagesPerDMA(ct.pointer(n))
return n.value
@Action()
def save_raw(self, filename, typ):
"""This function saves the last acquisition as a raw data file.
See self.savetypes for the file type keys.
"""
self.lib.SaveAsRaw(ct.c_char_p(str.encode(filename)),
ct.c_int(self.savetypes[typ]))
### EXPOSURE SETTINGS
@Feat()
def acquisition_timings(self):
"""This function will return the current “valid” acquisition timing
information. This function should be used after all the acquisitions
settings have been set, e.g. SetExposureTime, SetKineticCycleTime and
SetReadMode etc. The values returned are the actual times used in
subsequent acquisitions.
This function is required as it is possible to set the exposure time to
20ms, accumulate cycle time to 30ms and then set the readout mode to
full image. As it can take 250ms to read out an image it is not
possible to have a cycle time of 30ms.
All data is measured in seconds.
"""
exp = ct.c_float()
accum = ct.c_float()
kine = ct.c_float()
self.lib.GetAcquisitionTimings(ct.pointer(exp), ct.pointer(accum),
ct.pointer(kine))
return exp.value * seg, accum.value * seg, kine.value * seg
@Action()
def set_exposure_time(self, time):
"""This function will set the exposure time to the nearest valid value
not less than the given value, in seconds. The actual exposure time
used is obtained by GetAcquisitionTimings. Please refer to
SECTION 5 – ACQUISITION MODES for further information.
"""
try:
time.magnitude
except AttributeError:
time = time * seg
self.lib.SetExposureTime(ct.c_float(time.magnitude))
@Action()
def set_accum_time(self, time):
"""This function will set the accumulation cycle time to the nearest
valid value not less than the given value. The actual cycle time used
is obtained by GetAcquisitionTimings. Please refer to
SECTION 5 – ACQUISITION MODES for further information.
"""
try:
time.magnitude
except AttributeError:
time = time * seg
self.lib.SetAccumulationCycleTime(ct.c_float(time.magnitude))
@Action()
def set_kinetic_cycle_time(self, time):
"""This function will set the kinetic cycle time to the nearest valid
value not less than the given value. The actual time used is obtained
by GetAcquisitionTimings. . Please refer to
SECTION 5 – ACQUISITION MODES for further information.
float time: the kinetic cycle time in seconds.
"""
try:
time.magnitude
except AttributeError:
time = time * seg
self.lib.SetKineticCycleTime(ct.c_float(time.magnitude))
@Action()
def set_n_kinetics(self, n):
"""This function will set the number of scans (possibly accumulated
scans) to be taken during a single acquisition sequence. This will only
take effect if the acquisition mode is Kinetic Series.
"""
self.lib.SetNumberKinetics(ct.c_int(n))
@Action()
def set_n_accum(self, n):
"""This function will set the number of scans accumulated in memory.
This will only take effect if the acquisition mode is either Accumulate
or Kinetic Series.
"""
self.lib.SetNumberAccumulations(ct.c_int(n))
@Feat(units='s')
def keep_clean_time(self):
"""This function will return the time to perform a keep clean cycle.
This function should be used after all the acquisitions settings have
been set, e.g. SetExposureTime, SetKineticCycleTime and SetReadMode
etc. The value returned is the actual times used in subsequent
acquisitions.
"""
time = ct.c_float()
self.lib.GetKeepCleanTime(ct.pointer(time))
return time.value
@Feat(units='s')
def readout_time(self):
"""This function will return the time to readout data from a sensor.
This function should be used after all the acquisitions settings have
been set, e.g. SetExposureTime, SetKineticCycleTime and SetReadMode
etc. The value returned is the actual times used in subsequent
acquisitions.
"""
time = ct.c_float()
self.lib.GetReadOutTime(ct.pointer(time))
return time.value
@Feat(read_once=True, units='s')
def max_exposure(self):
"""This function will return the maximum Exposure Time in seconds that
is settable by the SetExposureTime function.
"""
exp = ct.c_float()
self.lib.GetMaximumExposure(ct.pointer(exp))
return exp.value
@Feat(read_once=True)
def n_max_nexposure(self):
"""This function will return the maximum number of exposures that can
be configured in the SetRingExposureTimes SDK function.
"""
n = ct.c_int()
self.lib.GetMaximumNumberRingExposureTimes(ct.pointer(n))
return n.value
def true_exposure_times(self, n): # FIXME: bit order? something
"""This function will return the actual exposure times that the camera
will use. There may be differences between requested exposures and the
actual exposures.
ntimes: Numbers of times requested.
"""
times = np.ascontiguousarray(np.zeros(n, dtype=np.float))
outtimes = times.ctypes.data_as(ct.POINTER(ct.c_float))
self.lib.GetAdjustedRingExposureTimes(ct.c_int(n), outtimes)
return times
def exposure_times(self, value):
n = ct.c_int(len(value))
value = np.ascontiguousarray(value.astype(np.float))
outvalue = value.ctypes.data_as(ct.POINTER(ct.c_float))
self.lib.SetRingExposureTimes(n, outvalue)
@Feat(values={True: 1, False: 0})
def frame_transfer_mode(self):
"""This function will set whether an acquisition will readout in Frame
Transfer Mode. If the acquisition mode is Single Scan or Fast Kinetics
this call will have no affect.
"""
return self.frame_transfer_mode_state
@frame_transfer_mode.setter
def frame_transfer_mode(self, state):
ans = self.lib.SetFrameTransferMode(ct.c_int(state))
if ans == 20002:
self.frame_transfer_mode_state = state
### AMPLIFIERS, GAIN, SPEEDS
@Feat(read_once=True)
def n_preamps(self):
"""Available in some systems are a number of pre amp gains that can be
applied to the data as it is read out. This function gets the number of
these pre amp gains available. The functions GetPreAmpGain and
SetPreAmpGain can be used to specify which of these gains is to be
used.
"""
n = ct.c_int()
self.lib.GetNumberPreAmpGains(ct.pointer(n))
return n.value
def preamp_available(self, channel, amp, index, preamp):
"""This function checks that the AD channel exists, and that the
amplifier, speed and gain are available for the AD channel.
"""
channel = ct.c_int(channel)
amp = ct.c_int(amp)
index = ct.c_int(index)
preamp = ct.c_int(preamp)
status = ct.c_int()
self.lib.IsPreAmpGainAvailable(channel, amp, index, preamp,
ct.pointer(status))
return bool(status.value)
def preamp_descr(self, index):
"""This function will return a string with a pre amp gain description.
The pre amp gain is selected using the index. The SDK has a string
associated with each of its pre amp gains. The maximum number of
characters needed to store the pre amp gain descriptions is 30. The
user has to specify the number of characters they wish to have returned
to them from this function.
"""
index = ct.c_int(index)
descr = (ct.c_char * 30)()
leng = ct.c_int(30)
self.lib.GetAmpDesc(index, ct.pointer(descr), leng)
return str(descr.value)[2:-1]
def true_preamp(self, index):
"""For those systems that provide a number of pre amp gains to apply
to the data as it is read out; this function retrieves the amount of
gain that is stored for a particular index. The number of gains
available can be obtained by calling the GetNumberPreAmpGains function
and a specific Gain can be selected using the function SetPreAmpGain.
"""
index = ct.c_int(index)
gain = ct.c_float()
self.lib.GetPreAmpGain(index, ct.pointer(gain))
return gain.value
@Feat()
def preamp(self):
"""This function will set the pre amp gain to be used for subsequent
acquisitions. The actual gain factor that will be applied can be found
through a call to the GetPreAmpGain function.
The number of Pre Amp Gains available is found by calling the
GetNumberPreAmpGains function.
"""
return self.preamp_index
@preamp.setter
def preamp(self, index):
self.preamp_index = index
self.lib.SetPreAmpGain(ct.c_int(index))
@Feat(values={True: 1, False: 0})
def EM_advanced_enabled(self):
"""This function turns on and off access to higher EM gain levels
within the SDK. Typically, optimal signal to noise ratio and dynamic
range is achieved between x1 to x300 EM Gain.
Higher gains of > x300 are recommended for single photon counting only.
Before using higher levels, you should ensure that light levels do not
exceed the regime of tens of photons per pixel, otherwise accelerated
ageing of the sensor can occur.
This is set to False upon initialization of the camera.
"""
state = ct.c_int()
self.lib.GetEMAdvanced(ct.pointer(state))
return state.value
@EM_advanced_enabled.setter
def EM_advanced_enabled(self, value):
self.lib.SetEMAdvanced(ct.c_int(value))
@Feat(values={'DAC255': 0, 'DAC4095': 1, 'Linear': 2, 'RealGain': 3})
def EM_gain_mode(self):
"""Set the EM Gain mode to one of the following possible settings.
Mode 0: The EM Gain is controlled by DAC settings in the range
0-255. Default mode.
1: The EM Gain is controlled by DAC settings in the range 0-4095.
2: Linear mode.
3: Real EM gain
"""
return self.EM_gain_mode_index
@EM_gain_mode.setter
def EM_gain_mode(self, mode):
ans = self.lib.SetEMGainMode(ct.c_int(mode))
if ans == 20002:
self.EM_gain_mode_index = mode
@Feat()
def EM_gain(self):
"""Allows the user to change the gain value. The valid range for the
gain depends on what gain mode the camera is operating in. See
SetEMGainMode to set the mode and GetEMGainRange to get the valid range
to work with. To access higher gain values (>x300) see SetEMAdvanced.
"""
gain = ct.c_int()
self.lib.GetEMCCDGain(ct.pointer(gain))
return gain.value
@EM_gain.setter
def EM_gain(self, value):
self.lib.SetEMCCDGain(ct.c_int(value))
@Feat()
def EM_gain_range(self):
"""Returns the minimum and maximum values of the current selected EM
Gain mode and temperature of the sensor.
"""
mini, maxi = ct.c_int(), ct.c_int()
self.lib.GetEMGainRange(ct.pointer(mini), ct.pointer(maxi))
return (mini.value, maxi.value)
@Feat(read_once=True)
def n_ad_channels(self):
n = ct.c_int()
self.lib.GetNumberADChannels(ct.pointer(n))
return n.value
@Feat(read_once=True)
def n_amps(self):
n = ct.c_int()
self.lib.GetNumberAmp(ct.pointer(n))
return n.value
def amp_available(self, iamp):
"""This function checks if the hardware and current settings permit
the use of the specified amplifier."""
ans = self.lib.IsAmplifierAvailable(ct.c_int(iamp))
if ans == 20002:
return True
else:
return False
def amp_descr(self, index):
"""This function will return a string with an amplifier description.
The amplifier is selected using the index. The SDK has a string
associated with each of its amplifiers. The maximum number of
characters needed to store the amplifier descriptions is 21. The user
has to specify the number of characters they wish to have returned to
them from this function.
"""
index = ct.c_int(index)
descr = (ct.c_char * 21)()
leng = ct.c_int(21)
self.lib.GetAmpDesc(index, ct.pointer(descr), leng)
return str(descr.value)[2:-1]
def readout_flipped(self, iamp):
"""On cameras with multiple amplifiers the frame readout may be
flipped. This function can be used to determine if this is the case.
"""
flipped = ct.c_int()
self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),
ct.pointer(flipped))
return bool(flipped.value)
def amp_max_hspeed(self, index):
"""This function will return the maximum available horizontal shift
speed for the amplifier selected by the index parameter.
"""
hspeed = ct.c_float()
self.lib.GetAmpMaxSpeed(ct.c_int(index), ct.pointer(hspeed))
return hspeed.value
def n_horiz_shift_speeds(self, channel=0, typ=None):
"""As your Andor SDK system is capable of operating at more than one
horizontal shift speed this function will return the actual number of
speeds available.
:param channel: the AD channel.
:param typ: output amplification. 0 electron multiplication. 1 conventional.
"""
if typ is None:
typ = self.amp_typ
n = ct.c_int()
self.lib.GetNumberHSSpeeds(ct.c_int(channel),
ct.c_int(typ), ct.pointer(n))
return n.value
def true_horiz_shift_speed(self, index=0, typ=None, ad=0):
"""As your Andor system is capable of operating at more than one
horizontal shift speed this function will return the actual speeds
available. The value returned is in MHz.
GetHSSpeed(int channel, int typ, int index, float* speed)
:param typ: output amplification.
0 electron multiplication/Conventional(clara)
1 conventional/Extended NIR Mode(clara).
:param index: speed required
0 to NumberSpeeds-1 where NumberSpeeds is value returned in
first parameter after a call to GetNumberHSSpeeds().
:param ad: the AD channel.
"""
if typ is None:
typ = self.amp_typ
speed = ct.c_float()
self.lib.GetHSSpeed(ct.c_int(ad), ct.c_int(typ), ct.c_int(index),
ct.pointer(speed))
return speed.value * MHz
@Feat()
def horiz_shift_speed(self):
return self.horiz_shift_speed_index
@horiz_shift_speed.setter
def horiz_shift_speed(self, index):
"""This function will set the speed at which the pixels are shifted
into the output node during the readout phase of an acquisition.
Typically your camera will be capable of operating at several
horizontal shift speeds. To get the actual speed that an index
corresponds to use the GetHSSpeed function.
:param typ: output amplification.
0 electron multiplication/Conventional(clara).
1 conventional/Extended NIR mode(clara).
:param index: the horizontal speed to be used
0 to GetNumberHSSpeeds() - 1
"""
ans = self.lib.SetHSSpeed(ct.c_int(self.amp_typ), ct.c_int(index))
if ans == 20002:
self.horiz_shift_speed_index = index
@Feat()
def fastest_recommended_vsspeed(self):
"""As your Andor SDK system may be capable of operating at more than
one vertical shift speed this function will return the fastest
recommended speed available. The very high readout speeds, may require
an increase in the amplitude of the Vertical Clock Voltage using
SetVSAmplitude. This function returns the fastest speed which does not
require the Vertical Clock Voltage to be adjusted. The values returned
are the vertical shift speed index and the actual speed in microseconds
per pixel shift.
"""
inti, f2 = ct.c_int(), ct.c_float()
self.lib.GetFastestRecommendedVSSpeed(ct.pointer(inti), ct.pointer(f2))
return (inti.value, f2.value)
@Feat(read_once=True)
def n_vert_clock_amps(self):
"""This function will normally return the number of vertical clock
voltage amplitudes that the camera has.
"""
n = ct.c_int()
self.lib.GetNumberVSAmplitudes(ct.pointer(n))
return n.value
def vert_amp_index(self, string):
"""This Function is used to get the index of the Vertical Clock
Amplitude that corresponds to the string passed in.
:param string: "Normal" , "+1" , "+2" , "+3" , "+4"
"""
index = ct.c_int()
string = ct.c_char_p(str.encode(string))
self.lib.GetVSAmplitudeFromString(string, ct.pointer(index))
return index.value
def vert_amp_string(self, index):
"""This Function is used to get the Vertical Clock Amplitude string
that corresponds to the index passed in.
:param index: Index of VS amplitude required
Valid values 0 to GetNumberVSAmplitudes() - 1
"""
index = ct.c_int(index)
string = (ct.c_char * 6)()
self.lib.GetVSAmplitudeString(index, ct.pointer(string))
return str(string.value)[2:-1]
def true_vert_amp(self, index):
"""This Function is used to get the value of the Vertical Clock
Amplitude found at the index passed in.
:param index: Index of VS amplitude required
Valid values 0 to GetNumberVSAmplitudes() - 1
"""
index = ct.c_int(index)
amp = ct.c_int()
self.lib.GetVSAmplitudeValue(index, ct.pointer(amp))
return amp.value
@Action()
def set_vert_clock(self, index):
"""If you choose a high readout speed (a low readout time), then you
should also consider increasing the amplitude of the Vertical Clock
Voltage.
There are five levels of amplitude available for you to choose from:
- Normal, +1, +2, +3, +4
Exercise caution when increasing the amplitude of the vertical clock
voltage, since higher clocking voltages may result in increased
clock-induced charge (noise) in your signal. In general, only the very
highest vertical clocking speeds are likely to benefit from an
increased vertical clock voltage amplitude.
"""
self.lib.SetVSAmplitude(ct.c_int(index))
@Feat(read_once=True)
def n_vert_shift_speeds(self):
"""As your Andor system may be capable of operating at more than one
vertical shift speed this function will return the actual number of
speeds available.
"""
n = ct.c_int()
self.lib.GetNumberVSSpeeds(ct.pointer(n))
return n.value
def true_vert_shift_speed(self, index=0):
"""As your Andor SDK system may be capable of operating at more than
one vertical shift speed this function will return the actual speeds
available. The value returned is in microseconds.
"""
speed = ct.c_float()
self.lib.GetVSSpeed(ct.c_int(index), ct.pointer(speed))
return speed.value * us
@Feat()
def vert_shift_speed(self):
return self.vert_shift_speed_index
@vert_shift_speed.setter
def vert_shift_speed(self, index):
"""This function will set the vertical speed to be used for subsequent
acquisitions.
"""
self.vert_shift_speed_index = index
self.lib.SetVSSpeed(ct.c_int(index))
### BASELINE
@Feat(values={True: 1, False: 0})
def baseline_clamp(self):
"""This function returns the status of the baseline clamp
functionality. With this feature enabled the baseline level of each
scan in a kinetic series will be more consistent across the sequence.
"""
i = ct.c_int()
self.lib.GetBaselineClamp(ct.pointer(i))
return i.value
@baseline_clamp.setter
def baseline_clamp(self, value):
value = ct.c_int(value)
self.lib.SetBaselineClamp(value)
@Feat(limits=(-1000, 1100, 100))
def baseline_offset(self):
"""This function allows the user to move the baseline level by the
amount selected. For example “+100” will add approximately 100 counts
to the default baseline value. The value entered should be a multiple
of 100 between -1000 and +1000 inclusively.
"""
return self.baseline_offset_value
@baseline_offset.setter
def baseline_offset(self, value):
ans = self.lib.SetBaselineOffset(ct.c_int(value))
if ans == 20002:
self.baseline_offset_value = value
### BIT DEPTH
def bit_depth(self, ch):
"""This function will retrieve the size in bits of the dynamic range
for any available AD channel.
"""
ch = ct.c_int(ch)
depth = ct.c_uint()
self.lib.GetBitDepth(ch, ct.pointer(depth))
return depth.value
### TRIGGER
@Feat(values={True: 1, False: 0})
def adv_trigger_mode(self):
"""This function will set the state for the iCam functionality that
some cameras are capable of. There may be some cases where we wish to
prevent the software using the new functionality and just do it the way
it was previously done.
"""
return self.adv_trigger_mode_state
@adv_trigger_mode.setter
def adv_trigger_mode(self, state):
ans = self.lib.SetAdvancedTriggerModeState(ct.c_int(state))
if ans == 20002:
self.adv_trigger_mode_state = state
def trigger_mode_available(self, modestr):
"""This function checks if the hardware and current settings permit
the use of the specified trigger mode.
"""
index = self.triggers[modestr]
ans = self.lib.IsTriggerModeAvailable(ct.c_int(index))
if ans == 20002:
return True
else:
return False
@Feat(values={'Internal': 0, 'External': 1, 'External Start': 6,
'External Exposure': 7, 'External FVB EM': 9,
'Software Trigger': 10, 'External Charge Shifting': 12})
def trigger_mode(self):
"""This function will set the trigger mode that the camera will
operate in.
"""
return self.trigger_mode_index
@trigger_mode.setter
def trigger_mode(self, mode):
ans = self.lib.SetTriggerMode(ct.c_int(mode))
if ans == 20002:
self.trigger_mode_index = mode
@Action()
def send_software_trigger(self):
"""This function sends an event to the camera to take an acquisition
when in Software Trigger mode. Not all cameras have this mode available
to them. To check if your camera can operate in this mode check the
GetCapabilities function for the Trigger Mode
AC_TRIGGERMODE_CONTINUOUS. If this mode is physically possible and
other settings are suitable (IsTriggerModeAvailable) and the camera is
acquiring then this command will take an acquisition.
NOTES:
The settings of the camera must be as follows:
- ReadOut mode is full image
- RunMode is Run Till Abort
- TriggerMode is 10
"""
self.lib.SendSoftwareTrigger()
@Action()
def trigger_level(self, value):
"""This function sets the trigger voltage which the system will use.
"""
self.lib.SetTriggerLevel(ct.c_float(value))
### AUXPORT
@DictFeat(values={True: not(0), False: 0}, keys=list(range(1, 5)))
def in_aux_port(self, port):
"""This function returns the state of the TTL Auxiliary Input Port on
the Andor plug-in card.
"""
port = ct.c_int(port)
state = ct.c_int()
self.lib.InAuxPort(port, ct.pointer(state))
return state.value
@DictFeat(values={True: 1, False: 0}, keys=list(range(1, 5)))
def out_aux_port(self, port):
"""This function sets the TTL Auxiliary Output port (P) on the Andor
plug-in card to either ON/HIGH or OFF/LOW.
"""
return self.auxout[port - 1]
@out_aux_port.setter
def out_aux_port(self, port, state):
self.auxout[port - 1] = bool(state)
port = ct.c_int(port)
state = ct.c_int(state)
self.lib.OutAuxPort(port, ct.pointer(state))
def is_implemented(self, strcommand):
"""Checks if command is implemented.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_IsImplemented(self.AT_H, command, ct.addressof(result))
return result.value
def is_writable(self, strcommand):
"""Checks if command is writable.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_IsWritable(self.AT_H, command, ct.addressof(result))
return result.value
def queuebuffer(self, bufptr, value):
"""Put buffer in queue.
"""
value = ct.c_int(value)
self.lib.AT_QueueBuffer(self.AT_H, ct.byref(bufptr), value)
def waitbuffer(self, ptr, bufsize):
"""Wait for next buffer ready.
"""
timeout = ct.c_int(20000)
self.lib.AT_WaitBuffer(self.AT_H, ct.byref(ptr), ct.byref(bufsize),
timeout)
def command(self, strcommand):
"""Run command.
"""
command = ct.c_wchar_p(strcommand)
self.lib.AT_Command(self.AT_H, command)
def getint(self, strcommand):
"""Run command and get Int return value.
"""
result = ct.c_longlong()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetInt(self.AT_H, command, ct.addressof(result))
return result.value
def setint(self, strcommand, value):
"""SetInt function.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_longlong(value)
self.lib.AT_SetInt(self.AT_H, command, value)
def getfloat(self, strcommand):
"""Run command and get Int return value.
"""
result = ct.c_double()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetFloat(self.AT_H, command, ct.addressof(result))
return result.value
def setfloat(self, strcommand, value):
"""Set command with Float value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_double(value)
self.lib.AT_SetFloat(self.AT_H, command, value)
def getbool(self, strcommand):
"""Run command and get Bool return value.
"""
result = ct.c_bool()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetBool(self.AT_H, command, ct.addressof(result))
return result.value
def setbool(self, strcommand, value):
"""Set command with Bool value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_bool(value)
self.lib.AT_SetBool(self.AT_H, command, value)
def getenumerated(self, strcommand):
"""Run command and set Enumerated return value.
"""
result = ct.c_int()
command = ct.c_wchar_p(strcommand)
self.lib.AT_GetEnumerated(self.AT_H, command, ct.addressof(result))
def setenumerated(self, strcommand, value):
"""Set command with Enumerated value parameter.
"""
command = ct.c_wchar_p(strcommand)
value = ct.c_bool(value)
self.lib.AT_SetEnumerated(self.AT_H, command, value)
def setenumstring(self, strcommand, item):
"""Set command with EnumeratedString value parameter.
"""
command = ct.c_wchar_p(strcommand)
item = ct.c_wchar_p(item)
self.lib.AT_SetEnumString(self.AT_H, command, item)
def flush(self):
self.lib.AT_Flush(self.AT_H)
if __name__ == '__main__':
from matplotlib import pyplot as plt
from lantz import Q_
import time
degC = Q_(1, 'degC')
us = Q_(1, 'us')
MHz = Q_(1, 'MHz')
s = Q_(1, 's')
with CCD() as andor:
print(andor.idn)
andor.free_int_mem()
# Acquisition settings
andor.readout_mode = 'Image'
andor.set_image()
# andor.acquisition_mode = 'Single Scan'
andor.acquisition_mode = 'Run till abort'
andor.set_exposure_time(0.03 * s)
andor.trigger_mode = 'Internal'
andor.amp_typ = 0
andor.horiz_shift_speed = 0
andor.vert_shift_speed = 0
andor.shutter(0, 0, 0, 0, 0)
# # Temperature stabilization
# andor.temperature_setpoint = -30 * degC
# andor.cooler_on = True
# stable = 'Temperature has stabilized at set point.'
# print('Temperature set point =', andor.temperature_setpoint)
# while andor.temperature_status != stable:
# print("Current temperature:", np.round(andor.temperature, 1))
# time.sleep(30)
# print('Temperature has stabilized at set point')
# Acquisition
andor.start_acquisition()
time.sleep(2)
data = andor.most_recent_image(shape=andor.detector_shape)
andor.abort_acquisition()
plt.imshow(data, cmap='gray', interpolation='None')
plt.colorbar()
plt.show()
print(data.min(), data.max(), data.mean())
| varses/awsch | lantz/drivers/andor/ccd.py | Python | bsd-3-clause | 74,992 |
import datetime
import httplib2
import itertools
import json
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Sum, Max
import commonware.log
from apiclient.discovery import build
from celeryutils import task
from oauth2client.client import OAuth2Credentials
import amo
import amo.search
from addons.models import Addon, AddonUser
from bandwagon.models import Collection
from lib.es.utils import get_indices
from reviews.models import Review
from stats.models import Contribution
from users.models import UserProfile
from versions.models import Version
from mkt.constants.regions import REGIONS_CHOICES_SLUG
from mkt.monolith.models import MonolithRecord
from mkt.webapps.models import Webapp
from . import search
from .models import (AddonCollectionCount, CollectionCount, CollectionStats,
DownloadCount, ThemeUserCount, UpdateCount)
log = commonware.log.getLogger('z.task')
@task
def addon_total_contributions(*addons, **kw):
"Updates the total contributions for a given addon."
log.info('[%s@%s] Updating total contributions.' %
(len(addons), addon_total_contributions.rate_limit))
# Only count uuid=None; those are verified transactions.
stats = (Contribution.objects.filter(addon__in=addons, uuid=None)
.values_list('addon').annotate(Sum('amount')))
for addon, total in stats:
Addon.objects.filter(id=addon).update(total_contributions=total)
@task
def update_addons_collections_downloads(data, **kw):
log.info("[%s] Updating addons+collections download totals." %
(len(data)))
cursor = connection.cursor()
q = ("UPDATE addons_collections SET downloads=%s WHERE addon_id=%s "
"AND collection_id=%s;" * len(data))
cursor.execute(q,
list(itertools.chain.from_iterable(
[var['sum'], var['addon'], var['collection']]
for var in data)))
transaction.commit_unless_managed()
@task
def update_collections_total(data, **kw):
log.info("[%s] Updating collections' download totals." %
(len(data)))
for var in data:
(Collection.objects.filter(pk=var['collection_id'])
.update(downloads=var['sum']))
def get_profile_id(service, domain):
"""
Fetch the profile ID for the given domain.
"""
accounts = service.management().accounts().list().execute()
account_ids = [a['id'] for a in accounts.get('items', ())]
for account_id in account_ids:
webproperties = service.management().webproperties().list(
accountId=account_id).execute()
webproperty_ids = [p['id'] for p in webproperties.get('items', ())]
for webproperty_id in webproperty_ids:
profiles = service.management().profiles().list(
accountId=account_id,
webPropertyId=webproperty_id).execute()
for p in profiles.get('items', ()):
# sometimes GA includes "http://", sometimes it doesn't.
if '://' in p['websiteUrl']:
name = p['websiteUrl'].partition('://')[-1]
else:
name = p['websiteUrl']
if name == domain:
return p['id']
@task
def update_google_analytics(date, **kw):
creds_data = getattr(settings, 'GOOGLE_ANALYTICS_CREDENTIALS', None)
if not creds_data:
log.critical('Failed to update global stats: '
'GOOGLE_ANALYTICS_CREDENTIALS not set')
return
creds = OAuth2Credentials(
*[creds_data[k] for k in
('access_token', 'client_id', 'client_secret',
'refresh_token', 'token_expiry', 'token_uri',
'user_agent')])
h = httplib2.Http()
creds.authorize(h)
service = build('analytics', 'v3', http=h)
domain = getattr(settings,
'GOOGLE_ANALYTICS_DOMAIN', None) or settings.DOMAIN
profile_id = get_profile_id(service, domain)
if profile_id is None:
log.critical('Failed to update global stats: could not access a Google'
' Analytics profile for ' + domain)
return
datestr = date.strftime('%Y-%m-%d')
try:
data = service.data().ga().get(ids='ga:' + profile_id,
start_date=datestr,
end_date=datestr,
metrics='ga:visits').execute()
# Storing this under the webtrends stat name so it goes on the
# same graph as the old webtrends data.
p = ['webtrends_DailyVisitors', data['rows'][0][0], date]
except Exception, e:
log.critical(
'Fetching stats data for %s from Google Analytics failed: %s' % e)
return
try:
cursor = connection.cursor()
cursor.execute('REPLACE INTO global_stats (name, count, date) '
'values (%s, %s, %s)', p)
transaction.commit_unless_managed()
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
return
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
@task
def update_global_totals(job, date, **kw):
log.info('Updating global statistics totals (%s) for (%s)' % (job, date))
jobs = _get_daily_jobs(date)
jobs.update(_get_metrics_jobs(date))
num = jobs[job]()
q = """REPLACE INTO global_stats (`name`, `count`, `date`)
VALUES (%s, %s, %s)"""
p = [job, num or 0, date]
try:
cursor = connection.cursor()
cursor.execute(q, p)
transaction.commit_unless_managed()
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
def _get_daily_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to today().
"""
if not date:
date = datetime.date.today()
# Passing through a datetime would not generate an error,
# but would pass and give incorrect values.
if isinstance(date, datetime.datetime):
raise ValueError('This requires a valid date, not a datetime')
# Testing on lte created date doesn't get you todays date, you need to do
# less than next date. That's because 2012-1-1 becomes 2012-1-1 00:00
next_date = date + datetime.timedelta(days=1)
date_str = date.strftime('%Y-%m-%d')
extra = dict(where=['DATE(created)=%s'], params=[date_str])
# If you're editing these, note that you are returning a function! This
# cheesy hackery was done so that we could pass the queries to celery
# lazily and not hammer the db with a ton of these all at once.
stats = {
# Add-on Downloads
'addon_total_downloads': lambda: DownloadCount.objects.filter(
date__lt=next_date).aggregate(sum=Sum('count'))['sum'],
'addon_downloads_new': lambda: DownloadCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
# Add-on counts
'addon_count_new': Addon.objects.extra(**extra).count,
# Version counts
'version_count_new': Version.objects.extra(**extra).count,
# User counts
'user_count_total': UserProfile.objects.filter(
created__lt=next_date).count,
'user_count_new': UserProfile.objects.extra(**extra).count,
# Review counts
'review_count_total': Review.objects.filter(created__lte=date,
editorreview=0).count,
'review_count_new': Review.objects.filter(editorreview=0).extra(
**extra).count,
# Collection counts
'collection_count_total': Collection.objects.filter(
created__lt=next_date).count,
'collection_count_new': Collection.objects.extra(**extra).count,
'collection_count_autopublishers': Collection.objects.filter(
created__lt=next_date, type=amo.COLLECTION_SYNCHRONIZED).count,
'collection_addon_downloads': (lambda:
AddonCollectionCount.objects.filter(date__lte=date).aggregate(
sum=Sum('count'))['sum']),
}
# If we're processing today's stats, we'll do some extras. We don't do
# these for re-processed stats because they change over time (eg. add-ons
# move from sandbox -> public
if date == datetime.date.today():
stats.update({
'addon_count_experimental': Addon.objects.filter(
created__lte=date, status=amo.STATUS_UNREVIEWED,
disabled_by_user=0).count,
'addon_count_nominated': Addon.objects.filter(
created__lte=date, status=amo.STATUS_NOMINATED,
disabled_by_user=0).count,
'addon_count_public': Addon.objects.filter(
created__lte=date, status=amo.STATUS_PUBLIC,
disabled_by_user=0).count,
'addon_count_pending': Version.objects.filter(
created__lte=date, files__status=amo.STATUS_PENDING).count,
'collection_count_private': Collection.objects.filter(
created__lte=date, listed=0).count,
'collection_count_public': Collection.objects.filter(
created__lte=date, listed=1).count,
'collection_count_editorspicks': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_FEATURED).count,
'collection_count_normal': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_NORMAL).count,
})
return stats
def _get_metrics_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the last date metrics put something in the db.
"""
if not date:
date = UpdateCount.objects.aggregate(max=Max('date'))['max']
# If you're editing these, note that you are returning a function!
stats = {
'addon_total_updatepings': lambda: UpdateCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
'collector_updatepings': lambda: UpdateCount.objects.get(
addon=11950, date=date).count,
}
return stats
@task
def index_update_counts(ids, **kw):
index = kw.pop('index', None)
indices = get_indices(index)
es = amo.search.get_es()
qs = UpdateCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s updates for %s.' % (qs.count(), qs[0].date))
try:
for update in qs:
key = '%s-%s' % (update.addon_id, update.date)
data = search.extract_update_count(update)
for index in indices:
UpdateCount.index(data, bulk=True, id=key, index=index)
es.flush_bulk(forced=True)
except Exception, exc:
index_update_counts.retry(args=[ids], exc=exc, **kw)
raise
@task
def index_download_counts(ids, **kw):
index = kw.pop('index', None)
indices = get_indices(index)
es = amo.search.get_es()
qs = DownloadCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s downloads for %s.' % (qs.count(), qs[0].date))
try:
for dl in qs:
key = '%s-%s' % (dl.addon_id, dl.date)
data = search.extract_download_count(dl)
for index in indices:
DownloadCount.index(data, bulk=True, id=key, index=index)
es.flush_bulk(forced=True)
except Exception, exc:
index_download_counts.retry(args=[ids], exc=exc)
raise
@task
def index_collection_counts(ids, **kw):
index = kw.pop('index', None)
indices = get_indices(index)
es = amo.search.get_es()
qs = CollectionCount.objects.filter(collection__in=ids)
if qs:
log.info('Indexing %s addon collection counts: %s'
% (qs.count(), qs[0].date))
try:
for collection_count in qs:
collection = collection_count.collection_id
key = '%s-%s' % (collection, collection_count.date)
filters = dict(collection=collection,
date=collection_count.date)
data = search.extract_addon_collection(
collection_count,
AddonCollectionCount.objects.filter(**filters),
CollectionStats.objects.filter(**filters))
for index in indices:
CollectionCount.index(data, bulk=True, id=key, index=index)
es.flush_bulk(forced=True)
except Exception, exc:
index_collection_counts.retry(args=[ids], exc=exc)
raise
@task
def index_theme_user_counts(ids, **kw):
index = kw.pop('index', None)
indices = get_indices(index)
es = amo.search.get_es()
qs = ThemeUserCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s theme user counts for %s.'
% (qs.count(), qs[0].date))
try:
for user_count in qs:
key = '%s-%s' % (user_count.addon_id, user_count.date)
data = search.extract_theme_user_count(user_count)
for index in indices:
ThemeUserCount.index(data, bulk=True, id=key, index=index)
es.flush_bulk(forced=True)
except Exception, exc:
index_theme_user_counts.retry(args=[ids], exc=exc)
raise
@task
def update_monolith_stats(metric, date, **kw):
log.info('Updating monolith statistics (%s) for (%s)' % (metric, date))
jobs = _get_monolith_jobs(date)[metric]
for job in jobs:
try:
# Only record if count is greater than zero.
count = job['count']()
if count:
value = {'count': count}
if 'dimensions' in job:
value.update(job['dimensions'])
MonolithRecord.objects.create(recorded=date, key=metric,
value=json.dumps(value))
log.debug('Monolith stats details: (%s) has (%s) for (%s). '
'Value: %s' % (metric, count, date, value))
except Exception as e:
log.critical('Update of monolith table failed: (%s): %s'
% ([metric, date], e))
def _get_monolith_jobs(date=None):
"""
Return a dict of Monolith based statistics queries.
The dict is of the form::
{'<metric_name>': [{'count': <callable>, 'dimensions': <dimensions>}]}
Where `dimensions` is an optional dict of dimensions we expect to filter on
via Monolith.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to today().
"""
if not date:
date = datetime.date.today()
# If we have a datetime make it a date so H/M/S isn't used.
if isinstance(date, datetime.datetime):
date = date.date()
next_date = date + datetime.timedelta(days=1)
stats = {
# Marketplace reviews.
'apps_review_count_new': [{
'count': Review.objects.filter(
created__range=(date, next_date), editorreview=0,
addon__type=amo.ADDON_WEBAPP).count,
}],
# New users
'mmo_user_count_total': [{
'count': UserProfile.objects.filter(
created__lt=next_date,
source=amo.LOGIN_SOURCE_MMO_BROWSERID).count,
}],
'mmo_user_count_new': [{
'count': UserProfile.objects.filter(
created__range=(date, next_date),
source=amo.LOGIN_SOURCE_MMO_BROWSERID).count,
}],
# New developers.
'mmo_developer_count_total': [{
'count': AddonUser.objects.filter(
addon__type=amo.ADDON_WEBAPP).values('user').distinct().count,
}],
# App counts.
'apps_count_new': [{
'count': Webapp.objects.filter(
created__range=(date, next_date)).count,
}],
}
# Add various "Apps Added" for all the dimensions we need.
apps = Webapp.objects.filter(created__range=(date, next_date))
package_counts = []
premium_counts = []
# privileged==packaged for our consideration.
package_types = amo.ADDON_WEBAPP_TYPES.copy()
package_types.pop(amo.ADDON_WEBAPP_PRIVILEGED)
for region_slug, region in REGIONS_CHOICES_SLUG:
# Apps added by package type and region.
for package_type in package_types.values():
package_counts.append({
'count': apps.filter(
is_packaged=package_type == 'packaged').exclude(
addonexcludedregion__region=region.id).count,
'dimensions': {'region': region_slug,
'package_type': package_type},
})
# Apps added by premium type and region.
for premium_type, pt_name in amo.ADDON_PREMIUM_API.items():
premium_counts.append({
'count': apps.filter(
premium_type=premium_type).exclude(
addonexcludedregion__region=region.id).count,
'dimensions': {'region': region_slug,
'premium_type': pt_name},
})
stats.update({'apps_added_by_package_type': package_counts})
stats.update({'apps_added_by_premium_type': premium_counts})
# Add various "Apps Available" for all the dimensions we need.
apps = Webapp.objects.filter(status=amo.STATUS_PUBLIC,
disabled_by_user=False)
package_counts = []
premium_counts = []
for region_slug, region in REGIONS_CHOICES_SLUG:
# Apps available by package type and region.
for package_type in package_types.values():
package_counts.append({
'count': apps.filter(
is_packaged=package_type == 'packaged').exclude(
addonexcludedregion__region=region.id).count,
'dimensions': {'region': region_slug,
'package_type': package_type},
})
# Apps available by premium type and region.
for premium_type, pt_name in amo.ADDON_PREMIUM_API.items():
premium_counts.append({
'count': apps.filter(
premium_type=premium_type).exclude(
addonexcludedregion__region=region.id).count,
'dimensions': {'region': region_slug,
'premium_type': pt_name},
})
stats.update({'apps_available_by_package_type': package_counts})
stats.update({'apps_available_by_premium_type': premium_counts})
return stats
| wagnerand/zamboni | apps/stats/tasks.py | Python | bsd-3-clause | 19,050 |
import hashlib
import logging
import os
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.db import transaction
from PIL import Image
from olympia import amo
from olympia.addons.models import (
Addon, attach_tags, attach_translations, AppSupport, CompatOverride,
IncompatibleVersions, Persona, Preview)
from olympia.addons.indexers import AddonIndexer
from olympia.amo.celery import task
from olympia.amo.decorators import set_modified_on, write
from olympia.amo.helpers import user_media_path
from olympia.amo.storage_utils import rm_stored_dir
from olympia.amo.utils import cache_ns_key, ImageCheck, LocalFileStorage
from olympia.editors.models import RereviewQueueTheme
from olympia.lib.es.utils import index_objects
from olympia.versions.models import Version
# pulling tasks from cron
from . import cron # noqa
log = logging.getLogger('z.task')
@task
@write
def version_changed(addon_id, **kw):
update_last_updated(addon_id)
update_appsupport([addon_id])
def update_last_updated(addon_id):
queries = Addon._last_updated_queries()
try:
addon = Addon.objects.get(pk=addon_id)
except Addon.DoesNotExist:
log.info('[1@None] Updating last updated for %s failed, no addon found'
% addon_id)
return
log.info('[1@None] Updating last updated for %s.' % addon_id)
if addon.is_persona():
q = 'personas'
elif addon.status == amo.STATUS_PUBLIC:
q = 'public'
else:
q = 'exp'
qs = queries[q].filter(pk=addon_id).using('default')
res = qs.values_list('id', 'last_updated')
if res:
pk, t = res[0]
Addon.objects.filter(pk=pk).update(last_updated=t)
@write
def update_appsupport(ids):
log.info("[%s@None] Updating appsupport for %s." % (len(ids), ids))
addons = Addon.objects.no_cache().filter(id__in=ids).no_transforms()
support = []
for addon in addons:
for app, appver in addon.compatible_apps.items():
if appver is None:
# Fake support for all version ranges.
min_, max_ = 0, 999999999999999999
else:
min_, max_ = appver.min.version_int, appver.max.version_int
support.append(AppSupport(addon=addon, app=app.id,
min=min_, max=max_))
if not support:
return
with transaction.atomic():
AppSupport.objects.filter(addon__id__in=ids).delete()
AppSupport.objects.bulk_create(support)
# All our updates were sql, so invalidate manually.
Addon.objects.invalidate(*addons)
@task
def delete_preview_files(id, **kw):
log.info('[1@None] Removing preview with id of %s.' % id)
p = Preview(id=id)
for f in (p.thumbnail_path, p.image_path):
try:
storage.delete(f)
except Exception, e:
log.error('Error deleting preview file (%s): %s' % (f, e))
@task(acks_late=True)
def index_addons(ids, **kw):
log.info('Indexing addons %s-%s. [%s]' % (ids[0], ids[-1], len(ids)))
transforms = (attach_tags, attach_translations)
index_objects(ids, Addon, AddonIndexer.extract_document,
kw.pop('index', None), transforms, Addon.unfiltered)
@task
def unindex_addons(ids, **kw):
for addon in ids:
log.info('Removing addon [%s] from search index.' % addon)
Addon.unindex(addon)
@task
def delete_persona_image(dst, **kw):
log.info('[1@None] Deleting persona image: %s.' % dst)
if not dst.startswith(user_media_path('addons')):
log.error("Someone tried deleting something they shouldn't: %s" % dst)
return
try:
storage.delete(dst)
except Exception, e:
log.error('Error deleting persona image: %s' % e)
@set_modified_on
def create_persona_preview_images(src, full_dst, **kw):
"""
Creates a 680x100 thumbnail used for the Persona preview and
a 32x32 thumbnail used for search suggestions/detail pages.
"""
log.info('[1@None] Resizing persona images: %s' % full_dst)
preview, full = amo.PERSONA_IMAGE_SIZES['header']
preview_w, preview_h = preview
orig_w, orig_h = full
with storage.open(src) as fp:
i_orig = i = Image.open(fp)
# Crop image from the right.
i = i.crop((orig_w - (preview_w * 2), 0, orig_w, orig_h))
# Resize preview.
i = i.resize(preview, Image.ANTIALIAS)
i.load()
with storage.open(full_dst[0], 'wb') as fp:
i.save(fp, 'png')
_, icon_size = amo.PERSONA_IMAGE_SIZES['icon']
icon_w, icon_h = icon_size
# Resize icon.
i = i_orig
i.load()
i = i.crop((orig_w - (preview_h * 2), 0, orig_w, orig_h))
i = i.resize(icon_size, Image.ANTIALIAS)
i.load()
with storage.open(full_dst[1], 'wb') as fp:
i.save(fp, 'png')
return True
@set_modified_on
def save_persona_image(src, full_dst, **kw):
"""Creates a PNG of a Persona header/footer image."""
log.info('[1@None] Saving persona image: %s' % full_dst)
img = ImageCheck(storage.open(src))
if not img.is_image():
log.error('Not an image: %s' % src, exc_info=True)
return
with storage.open(src, 'rb') as fp:
i = Image.open(fp)
with storage.open(full_dst, 'wb') as fp:
i.save(fp, 'png')
return True
@task
def update_incompatible_appversions(data, **kw):
"""Updates the incompatible_versions table for this version."""
log.info('Updating incompatible_versions for %s versions.' % len(data))
addon_ids = set()
for version_id in data:
# This is here to handle both post_save and post_delete hooks.
IncompatibleVersions.objects.filter(version=version_id).delete()
try:
version = Version.objects.get(pk=version_id)
except Version.DoesNotExist:
log.info('Version ID [%d] not found. Incompatible versions were '
'cleared.' % version_id)
return
addon_ids.add(version.addon_id)
try:
compat = CompatOverride.objects.get(addon=version.addon)
except CompatOverride.DoesNotExist:
log.info('Compat override for addon with version ID [%d] not '
'found. Incompatible versions were cleared.' % version_id)
return
app_ranges = []
ranges = compat.collapsed_ranges()
for range in ranges:
if range.min == '0' and range.max == '*':
# Wildcard range, add all app ranges
app_ranges.extend(range.apps)
else:
# Since we can't rely on add-on version numbers, get the min
# and max ID values and find versions whose ID is within those
# ranges, being careful with wildcards.
min_id = max_id = None
if range.min == '0':
versions = (Version.objects.filter(addon=version.addon_id)
.order_by('id')
.values_list('id', flat=True)[:1])
if versions:
min_id = versions[0]
else:
try:
min_id = Version.objects.get(addon=version.addon_id,
version=range.min).id
except Version.DoesNotExist:
pass
if range.max == '*':
versions = (Version.objects.filter(addon=version.addon_id)
.order_by('-id')
.values_list('id', flat=True)[:1])
if versions:
max_id = versions[0]
else:
try:
max_id = Version.objects.get(addon=version.addon_id,
version=range.max).id
except Version.DoesNotExist:
pass
if min_id and max_id:
if min_id <= version.id <= max_id:
app_ranges.extend(range.apps)
for app_range in app_ranges:
IncompatibleVersions.objects.create(version=version,
app=app_range.app.id,
min_app_version=app_range.min,
max_app_version=app_range.max)
log.info('Added incompatible version for version ID [%d]: '
'app:%d, %s -> %s' % (version_id, app_range.app.id,
app_range.min, app_range.max))
# Increment namespace cache of compat versions.
for addon_id in addon_ids:
cache_ns_key('d2c-versions:%s' % addon_id, increment=True)
def make_checksum(header_path, footer_path):
ls = LocalFileStorage()
footer = footer_path and ls._open(footer_path).read() or ''
raw_checksum = ls._open(header_path).read() + footer
return hashlib.sha224(raw_checksum).hexdigest()
def theme_checksum(theme, **kw):
theme.checksum = make_checksum(theme.header_path, theme.footer_path)
dupe_personas = Persona.objects.filter(checksum=theme.checksum)
if dupe_personas.exists():
theme.dupe_persona = dupe_personas[0]
theme.save()
def rereviewqueuetheme_checksum(rqt, **kw):
"""Check for possible duplicate theme images."""
dupe_personas = Persona.objects.filter(
checksum=make_checksum(rqt.header_path or rqt.theme.header_path,
rqt.footer_path or rqt.theme.footer_path))
if dupe_personas.exists():
rqt.dupe_persona = dupe_personas[0]
rqt.save()
@task
@write
def save_theme(header, footer, addon, **kw):
"""Save theme image and calculates checksum after theme save."""
dst_root = os.path.join(user_media_path('addons'), str(addon.id))
header = os.path.join(settings.TMP_PATH, 'persona_header', header)
header_dst = os.path.join(dst_root, 'header.png')
if footer:
footer = os.path.join(settings.TMP_PATH, 'persona_footer', footer)
footer_dst = os.path.join(dst_root, 'footer.png')
try:
save_persona_image(src=header, full_dst=header_dst)
if footer:
save_persona_image(src=footer, full_dst=footer_dst)
create_persona_preview_images(
src=header, full_dst=[os.path.join(dst_root, 'preview.png'),
os.path.join(dst_root, 'icon.png')],
set_modified_on=[addon])
theme_checksum(addon.persona)
except IOError:
addon.delete()
raise
@task
@write
def save_theme_reupload(header, footer, addon, **kw):
header_dst = None
footer_dst = None
dst_root = os.path.join(user_media_path('addons'), str(addon.id))
try:
if header:
header = os.path.join(settings.TMP_PATH, 'persona_header', header)
header_dst = os.path.join(dst_root, 'pending_header.png')
save_persona_image(src=header, full_dst=header_dst)
if footer:
footer = os.path.join(settings.TMP_PATH, 'persona_footer', footer)
footer_dst = os.path.join(dst_root, 'pending_footer.png')
save_persona_image(src=footer, full_dst=footer_dst)
except IOError as e:
log.error(str(e))
raise
if header_dst or footer_dst:
theme = addon.persona
header = 'pending_header.png' if header_dst else theme.header
# Theme footer is optional, but can't be None.
footer = theme.footer or ''
if footer_dst:
footer = 'pending_footer.png'
# Store pending header and/or footer file paths for review.
RereviewQueueTheme.objects.filter(theme=theme).delete()
rqt = RereviewQueueTheme(theme=theme, header=header, footer=footer)
rereviewqueuetheme_checksum(rqt=rqt)
rqt.save()
@task
@write
def calc_checksum(theme_id, **kw):
"""For migration 596."""
lfs = LocalFileStorage()
theme = Persona.objects.get(id=theme_id)
header = theme.header_path
footer = theme.footer_path
# Delete invalid themes that are not images (e.g. PDF, EXE).
try:
Image.open(header)
Image.open(footer)
except IOError:
log.info('Deleting invalid theme [%s] (header: %s) (footer: %s)' %
(theme.addon.id, header, footer))
theme.addon.delete()
theme.delete()
rm_stored_dir(header.replace('header.png', ''), storage=lfs)
return
# Calculate checksum and save.
try:
theme.checksum = make_checksum(header, footer)
theme.save()
except IOError as e:
log.error(str(e))
| mstriemer/addons-server | src/olympia/addons/tasks.py | Python | bsd-3-clause | 12,888 |
from __future__ import absolute_import
from sentry.models import Activity
from sentry.testutils import APITestCase
class GroupNoteTest(APITestCase):
def test_simple(self):
group = self.group
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.NOTE,
user=self.user,
data={'text': 'hello world'},
)
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == str(activity.id)
class GroupNoteCreateTest(APITestCase):
def test_simple(self):
group = self.group
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.post(url, format='json')
assert response.status_code == 400
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 201, response.content
activity = Activity.objects.get(id=response.data['id'])
assert activity.user == self.user
assert activity.group == group
assert activity.data == {'text': 'hello world'}
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 400, response.content
| nicholasserra/sentry | tests/sentry/api/endpoints/test_group_notes.py | Python | bsd-3-clause | 1,576 |
from __future__ import absolute_import
from celery.utils.dispatch.saferef import safe_ref
from celery.tests.utils import Case
class Class1(object):
def x(self):
pass
def fun(obj):
pass
class Class2(object):
def __call__(self, obj):
pass
class SaferefTests(Case):
def setUp(self):
ts = []
ss = []
for x in xrange(5000):
t = Class1()
ts.append(t)
s = safe_ref(t.x, self._closure)
ss.append(s)
ts.append(fun)
ss.append(safe_ref(fun, self._closure))
for x in xrange(30):
t = Class2()
ts.append(t)
s = safe_ref(t, self._closure)
ss.append(s)
self.ts = ts
self.ss = ss
self.closureCount = 0
def tearDown(self):
del self.ts
del self.ss
def testIn(self):
"""Test the "in" operator for safe references (cmp)"""
for t in self.ts[:50]:
self.assertTrue(safe_ref(t.x) in self.ss)
def testValid(self):
"""Test that the references are valid (return instance methods)"""
for s in self.ss:
self.assertTrue(s())
def testShortCircuit(self):
"""Test that creation short-circuits to reuse existing references"""
sd = {}
for s in self.ss:
sd[s] = 1
for t in self.ts:
if hasattr(t, 'x'):
self.assertIn(safe_ref(t.x), sd)
else:
self.assertIn(safe_ref(t), sd)
def testRepresentation(self):
"""Test that the reference object's representation works
XXX Doesn't currently check the results, just that no error
is raised
"""
repr(self.ss[-1])
def _closure(self, ref):
"""Dumb utility mechanism to increment deletion counter"""
self.closureCount += 1
| mozilla/firefox-flicks | vendor-local/lib/python/celery/tests/utilities/test_saferef.py | Python | bsd-3-clause | 1,896 |
Subsets and Splits